var/home/core/zuul-output/0000755000175000017500000000000015072376457014545 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015072420240015465 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005225342615072420230017701 0ustar rootrootOct 11 06:53:35 crc systemd[1]: Starting Kubernetes Kubelet... Oct 11 06:53:35 crc restorecon[4807]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:35 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 06:53:36 crc restorecon[4807]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 11 06:53:36 crc restorecon[4807]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Oct 11 06:53:36 crc kubenswrapper[5055]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 11 06:53:36 crc kubenswrapper[5055]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Oct 11 06:53:36 crc kubenswrapper[5055]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 11 06:53:36 crc kubenswrapper[5055]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 11 06:53:36 crc kubenswrapper[5055]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Oct 11 06:53:36 crc kubenswrapper[5055]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.710490 5055 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721095 5055 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721147 5055 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721155 5055 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721160 5055 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721164 5055 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721168 5055 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721172 5055 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721177 5055 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721181 5055 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721185 5055 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721189 5055 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721193 5055 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721197 5055 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721202 5055 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721206 5055 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721210 5055 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721214 5055 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721219 5055 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721224 5055 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721230 5055 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721235 5055 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721241 5055 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721246 5055 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721252 5055 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721257 5055 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721262 5055 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721266 5055 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721270 5055 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721274 5055 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721280 5055 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721285 5055 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721289 5055 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721294 5055 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721298 5055 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721302 5055 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721306 5055 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721310 5055 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721314 5055 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721318 5055 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721322 5055 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721327 5055 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721331 5055 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721336 5055 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721341 5055 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721346 5055 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721350 5055 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721355 5055 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721359 5055 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721363 5055 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721370 5055 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721374 5055 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721379 5055 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721384 5055 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721389 5055 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721393 5055 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721397 5055 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721401 5055 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721405 5055 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721408 5055 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721412 5055 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721416 5055 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721422 5055 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721427 5055 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721432 5055 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721437 5055 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721441 5055 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721445 5055 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721449 5055 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721452 5055 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721456 5055 feature_gate.go:330] unrecognized feature gate: Example Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.721460 5055 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722259 5055 flags.go:64] FLAG: --address="0.0.0.0" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722276 5055 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722287 5055 flags.go:64] FLAG: --anonymous-auth="true" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722293 5055 flags.go:64] FLAG: --application-metrics-count-limit="100" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722301 5055 flags.go:64] FLAG: --authentication-token-webhook="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722305 5055 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722311 5055 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722318 5055 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722323 5055 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722327 5055 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722332 5055 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722339 5055 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722344 5055 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722352 5055 flags.go:64] FLAG: --cgroup-root="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722357 5055 flags.go:64] FLAG: --cgroups-per-qos="true" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722362 5055 flags.go:64] FLAG: --client-ca-file="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722366 5055 flags.go:64] FLAG: --cloud-config="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722370 5055 flags.go:64] FLAG: --cloud-provider="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722375 5055 flags.go:64] FLAG: --cluster-dns="[]" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722382 5055 flags.go:64] FLAG: --cluster-domain="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722386 5055 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722391 5055 flags.go:64] FLAG: --config-dir="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722396 5055 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722403 5055 flags.go:64] FLAG: --container-log-max-files="5" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722411 5055 flags.go:64] FLAG: --container-log-max-size="10Mi" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722417 5055 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722422 5055 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722427 5055 flags.go:64] FLAG: --containerd-namespace="k8s.io" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722434 5055 flags.go:64] FLAG: --contention-profiling="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722438 5055 flags.go:64] FLAG: --cpu-cfs-quota="true" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722443 5055 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722448 5055 flags.go:64] FLAG: --cpu-manager-policy="none" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722453 5055 flags.go:64] FLAG: --cpu-manager-policy-options="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722463 5055 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722467 5055 flags.go:64] FLAG: --enable-controller-attach-detach="true" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722472 5055 flags.go:64] FLAG: --enable-debugging-handlers="true" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722476 5055 flags.go:64] FLAG: --enable-load-reader="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722481 5055 flags.go:64] FLAG: --enable-server="true" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722486 5055 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722493 5055 flags.go:64] FLAG: --event-burst="100" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722498 5055 flags.go:64] FLAG: --event-qps="50" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722502 5055 flags.go:64] FLAG: --event-storage-age-limit="default=0" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722507 5055 flags.go:64] FLAG: --event-storage-event-limit="default=0" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722511 5055 flags.go:64] FLAG: --eviction-hard="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722518 5055 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722525 5055 flags.go:64] FLAG: --eviction-minimum-reclaim="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722530 5055 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722536 5055 flags.go:64] FLAG: --eviction-soft="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722541 5055 flags.go:64] FLAG: --eviction-soft-grace-period="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722546 5055 flags.go:64] FLAG: --exit-on-lock-contention="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722551 5055 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722556 5055 flags.go:64] FLAG: --experimental-mounter-path="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722561 5055 flags.go:64] FLAG: --fail-cgroupv1="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722565 5055 flags.go:64] FLAG: --fail-swap-on="true" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722570 5055 flags.go:64] FLAG: --feature-gates="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722575 5055 flags.go:64] FLAG: --file-check-frequency="20s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722580 5055 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722585 5055 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722590 5055 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722594 5055 flags.go:64] FLAG: --healthz-port="10248" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722599 5055 flags.go:64] FLAG: --help="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722605 5055 flags.go:64] FLAG: --hostname-override="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722610 5055 flags.go:64] FLAG: --housekeeping-interval="10s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722615 5055 flags.go:64] FLAG: --http-check-frequency="20s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722620 5055 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722626 5055 flags.go:64] FLAG: --image-credential-provider-config="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722632 5055 flags.go:64] FLAG: --image-gc-high-threshold="85" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722637 5055 flags.go:64] FLAG: --image-gc-low-threshold="80" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722642 5055 flags.go:64] FLAG: --image-service-endpoint="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722646 5055 flags.go:64] FLAG: --kernel-memcg-notification="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722650 5055 flags.go:64] FLAG: --kube-api-burst="100" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722654 5055 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722659 5055 flags.go:64] FLAG: --kube-api-qps="50" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722664 5055 flags.go:64] FLAG: --kube-reserved="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722668 5055 flags.go:64] FLAG: --kube-reserved-cgroup="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722672 5055 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722676 5055 flags.go:64] FLAG: --kubelet-cgroups="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722681 5055 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722686 5055 flags.go:64] FLAG: --lock-file="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722690 5055 flags.go:64] FLAG: --log-cadvisor-usage="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722695 5055 flags.go:64] FLAG: --log-flush-frequency="5s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722699 5055 flags.go:64] FLAG: --log-json-info-buffer-size="0" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722706 5055 flags.go:64] FLAG: --log-json-split-stream="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722711 5055 flags.go:64] FLAG: --log-text-info-buffer-size="0" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722716 5055 flags.go:64] FLAG: --log-text-split-stream="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722720 5055 flags.go:64] FLAG: --logging-format="text" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722725 5055 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722730 5055 flags.go:64] FLAG: --make-iptables-util-chains="true" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722734 5055 flags.go:64] FLAG: --manifest-url="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722739 5055 flags.go:64] FLAG: --manifest-url-header="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722745 5055 flags.go:64] FLAG: --max-housekeeping-interval="15s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722750 5055 flags.go:64] FLAG: --max-open-files="1000000" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722755 5055 flags.go:64] FLAG: --max-pods="110" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722760 5055 flags.go:64] FLAG: --maximum-dead-containers="-1" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722777 5055 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722781 5055 flags.go:64] FLAG: --memory-manager-policy="None" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722786 5055 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722792 5055 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722797 5055 flags.go:64] FLAG: --node-ip="192.168.126.11" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722804 5055 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722816 5055 flags.go:64] FLAG: --node-status-max-images="50" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722821 5055 flags.go:64] FLAG: --node-status-update-frequency="10s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722826 5055 flags.go:64] FLAG: --oom-score-adj="-999" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722831 5055 flags.go:64] FLAG: --pod-cidr="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722835 5055 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722845 5055 flags.go:64] FLAG: --pod-manifest-path="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722849 5055 flags.go:64] FLAG: --pod-max-pids="-1" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722854 5055 flags.go:64] FLAG: --pods-per-core="0" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722859 5055 flags.go:64] FLAG: --port="10250" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722863 5055 flags.go:64] FLAG: --protect-kernel-defaults="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722868 5055 flags.go:64] FLAG: --provider-id="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722873 5055 flags.go:64] FLAG: --qos-reserved="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722878 5055 flags.go:64] FLAG: --read-only-port="10255" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722883 5055 flags.go:64] FLAG: --register-node="true" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722887 5055 flags.go:64] FLAG: --register-schedulable="true" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722892 5055 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722900 5055 flags.go:64] FLAG: --registry-burst="10" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722904 5055 flags.go:64] FLAG: --registry-qps="5" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722909 5055 flags.go:64] FLAG: --reserved-cpus="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722914 5055 flags.go:64] FLAG: --reserved-memory="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722920 5055 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722925 5055 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722929 5055 flags.go:64] FLAG: --rotate-certificates="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722934 5055 flags.go:64] FLAG: --rotate-server-certificates="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722938 5055 flags.go:64] FLAG: --runonce="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722942 5055 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722946 5055 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722951 5055 flags.go:64] FLAG: --seccomp-default="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722955 5055 flags.go:64] FLAG: --serialize-image-pulls="true" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722960 5055 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722965 5055 flags.go:64] FLAG: --storage-driver-db="cadvisor" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722970 5055 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722975 5055 flags.go:64] FLAG: --storage-driver-password="root" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722980 5055 flags.go:64] FLAG: --storage-driver-secure="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722985 5055 flags.go:64] FLAG: --storage-driver-table="stats" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722989 5055 flags.go:64] FLAG: --storage-driver-user="root" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722993 5055 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.722998 5055 flags.go:64] FLAG: --sync-frequency="1m0s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723003 5055 flags.go:64] FLAG: --system-cgroups="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723008 5055 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723015 5055 flags.go:64] FLAG: --system-reserved-cgroup="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723019 5055 flags.go:64] FLAG: --tls-cert-file="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723024 5055 flags.go:64] FLAG: --tls-cipher-suites="[]" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723030 5055 flags.go:64] FLAG: --tls-min-version="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723036 5055 flags.go:64] FLAG: --tls-private-key-file="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723040 5055 flags.go:64] FLAG: --topology-manager-policy="none" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723044 5055 flags.go:64] FLAG: --topology-manager-policy-options="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723048 5055 flags.go:64] FLAG: --topology-manager-scope="container" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723053 5055 flags.go:64] FLAG: --v="2" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723060 5055 flags.go:64] FLAG: --version="false" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723066 5055 flags.go:64] FLAG: --vmodule="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723072 5055 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723077 5055 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723209 5055 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723216 5055 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723221 5055 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723225 5055 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723229 5055 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723234 5055 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723246 5055 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723250 5055 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723255 5055 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723259 5055 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723263 5055 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723267 5055 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723270 5055 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723274 5055 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723277 5055 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723281 5055 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723285 5055 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723289 5055 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723292 5055 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723297 5055 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723302 5055 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723306 5055 feature_gate.go:330] unrecognized feature gate: Example Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723311 5055 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723315 5055 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723318 5055 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723322 5055 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723326 5055 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723329 5055 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723333 5055 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723337 5055 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723341 5055 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723344 5055 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723348 5055 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723352 5055 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723355 5055 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723359 5055 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723362 5055 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723367 5055 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723375 5055 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723378 5055 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723384 5055 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723388 5055 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723393 5055 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723397 5055 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723401 5055 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723404 5055 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723408 5055 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723412 5055 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723415 5055 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723419 5055 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723423 5055 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723426 5055 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723430 5055 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723434 5055 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723437 5055 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723441 5055 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723445 5055 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723448 5055 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723452 5055 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723456 5055 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723459 5055 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723462 5055 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723467 5055 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723472 5055 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723477 5055 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723481 5055 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723486 5055 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723519 5055 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723524 5055 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723529 5055 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.723535 5055 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.723546 5055 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.735086 5055 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.735127 5055 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735219 5055 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735235 5055 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735241 5055 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735248 5055 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735254 5055 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735260 5055 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735265 5055 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735271 5055 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735276 5055 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735281 5055 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735285 5055 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735290 5055 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735296 5055 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735302 5055 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735310 5055 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735315 5055 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735321 5055 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735327 5055 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735333 5055 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735340 5055 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735347 5055 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735353 5055 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735358 5055 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735364 5055 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735369 5055 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735375 5055 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735380 5055 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735385 5055 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735390 5055 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735395 5055 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735400 5055 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735405 5055 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735410 5055 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735416 5055 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735422 5055 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735428 5055 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735435 5055 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735440 5055 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735445 5055 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735451 5055 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735456 5055 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735460 5055 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735465 5055 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735470 5055 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735477 5055 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735484 5055 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735490 5055 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735496 5055 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735538 5055 feature_gate.go:330] unrecognized feature gate: Example Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735544 5055 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735551 5055 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735556 5055 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735561 5055 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735566 5055 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735571 5055 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735577 5055 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735582 5055 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735587 5055 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735591 5055 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735597 5055 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735602 5055 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735606 5055 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735611 5055 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735616 5055 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735621 5055 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735626 5055 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735630 5055 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735636 5055 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735641 5055 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735646 5055 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735652 5055 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.735661 5055 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735871 5055 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735883 5055 feature_gate.go:330] unrecognized feature gate: Example Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735889 5055 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735894 5055 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735901 5055 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735907 5055 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735915 5055 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735920 5055 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735926 5055 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735931 5055 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735937 5055 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735943 5055 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735948 5055 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735954 5055 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735960 5055 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735966 5055 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735971 5055 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735976 5055 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735982 5055 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735987 5055 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735992 5055 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.735997 5055 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736002 5055 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736007 5055 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736012 5055 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736017 5055 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736022 5055 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736027 5055 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736032 5055 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736037 5055 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736041 5055 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736046 5055 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736051 5055 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736056 5055 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736062 5055 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736067 5055 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736072 5055 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736077 5055 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736082 5055 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736087 5055 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736092 5055 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736097 5055 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736101 5055 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736108 5055 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736114 5055 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736119 5055 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736124 5055 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736129 5055 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736134 5055 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736139 5055 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736144 5055 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736150 5055 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736156 5055 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736162 5055 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736167 5055 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736173 5055 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736178 5055 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736184 5055 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736189 5055 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736194 5055 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736199 5055 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736204 5055 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736209 5055 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736215 5055 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736221 5055 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736226 5055 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736231 5055 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736236 5055 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736241 5055 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736246 5055 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.736252 5055 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.736261 5055 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.736449 5055 server.go:940] "Client rotation is on, will bootstrap in background" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.741834 5055 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.741944 5055 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.743400 5055 server.go:997] "Starting client certificate rotation" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.743433 5055 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.743593 5055 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-24 11:19:46.686925983 +0000 UTC Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.743672 5055 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 1060h26m9.94325707s for next certificate rotation Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.779215 5055 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.781720 5055 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.800025 5055 log.go:25] "Validated CRI v1 runtime API" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.846924 5055 log.go:25] "Validated CRI v1 image API" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.848824 5055 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.856442 5055 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-10-11-06-44-40-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.856475 5055 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.875238 5055 manager.go:217] Machine: {Timestamp:2025-10-11 06:53:36.872319343 +0000 UTC m=+0.646593160 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654120448 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:db761453-8050-423f-b90e-e93814339b53 BootID:d49ae713-4c08-4a0c-a29b-99e24a2285c9 Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827060224 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:d4:da:c1 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:d4:da:c1 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:b8:11:af Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:6b:61:77 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:5d:75:d6 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:3a:61:30 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:a3:ba:88 Speed:-1 Mtu:1496} {Name:ens7.44 MacAddress:52:54:00:9c:fa:d2 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:92:40:3a:04:e2:dc Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:8e:44:2a:5f:88:e0 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654120448 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.875700 5055 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.875935 5055 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.880152 5055 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.880526 5055 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.880622 5055 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.880929 5055 topology_manager.go:138] "Creating topology manager with none policy" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.880986 5055 container_manager_linux.go:303] "Creating device plugin manager" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.881572 5055 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.881648 5055 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.881940 5055 state_mem.go:36] "Initialized new in-memory state store" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.882074 5055 server.go:1245] "Using root directory" path="/var/lib/kubelet" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.886035 5055 kubelet.go:418] "Attempting to sync node with API server" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.886119 5055 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.886238 5055 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.886310 5055 kubelet.go:324] "Adding apiserver pod source" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.886374 5055 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.892817 5055 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.892943 5055 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:36 crc kubenswrapper[5055]: E1011 06:53:36.893043 5055 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.162:6443: connect: connection refused" logger="UnhandledError" Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.893242 5055 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:36 crc kubenswrapper[5055]: E1011 06:53:36.893359 5055 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.162:6443: connect: connection refused" logger="UnhandledError" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.894812 5055 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.897119 5055 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.899809 5055 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.899842 5055 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.899853 5055 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.899864 5055 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.899905 5055 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.899915 5055 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.899925 5055 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.899940 5055 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.899952 5055 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.899962 5055 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.899988 5055 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.899998 5055 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.903425 5055 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.904066 5055 server.go:1280] "Started kubelet" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.904816 5055 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.904822 5055 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.905535 5055 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:36 crc systemd[1]: Started Kubernetes Kubelet. Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.906088 5055 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.908888 5055 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.909152 5055 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.909474 5055 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 20:21:30.472487438 +0000 UTC Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.914592 5055 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 685h27m53.557910308s for next certificate rotation Oct 11 06:53:36 crc kubenswrapper[5055]: E1011 06:53:36.914442 5055 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.914263 5055 server.go:460] "Adding debug handlers to kubelet server" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.917124 5055 volume_manager.go:287] "The desired_state_of_world populator starts" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.917202 5055 volume_manager.go:289] "Starting Kubelet Volume Manager" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.917448 5055 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Oct 11 06:53:36 crc kubenswrapper[5055]: E1011 06:53:36.918249 5055 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.162:6443: connect: connection refused" interval="200ms" Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.923261 5055 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:36 crc kubenswrapper[5055]: E1011 06:53:36.923424 5055 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.162:6443: connect: connection refused" logger="UnhandledError" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.924335 5055 factory.go:153] Registering CRI-O factory Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.924391 5055 factory.go:221] Registration of the crio container factory successfully Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.924456 5055 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.924464 5055 factory.go:55] Registering systemd factory Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.924470 5055 factory.go:221] Registration of the systemd container factory successfully Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.924491 5055 factory.go:103] Registering Raw factory Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.924505 5055 manager.go:1196] Started watching for new ooms in manager Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.925084 5055 manager.go:319] Starting recovery of all containers Oct 11 06:53:36 crc kubenswrapper[5055]: E1011 06:53:36.923670 5055 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.162:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.186d5d4d856ba7a4 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-10-11 06:53:36.904021924 +0000 UTC m=+0.678295741,LastTimestamp:2025-10-11 06:53:36.904021924 +0000 UTC m=+0.678295741,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.934828 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935015 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935082 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935147 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935206 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935269 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935326 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935389 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935454 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935520 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935579 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935659 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935723 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935802 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935872 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935931 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.935992 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.936050 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.936109 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.936172 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.936230 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.936288 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.936381 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.936447 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.936508 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.936568 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.936631 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.936692 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.936750 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.936853 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.936952 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.937047 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.937117 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.937177 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.937236 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.937298 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.937378 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.937464 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.937526 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.937588 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.937654 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.937713 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.937851 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.937914 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.937977 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938035 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938094 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938150 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938207 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938264 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938322 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938384 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938454 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938533 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938616 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938686 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938750 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938834 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938896 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.938958 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939018 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939078 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939138 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939195 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939254 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939313 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939374 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939433 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939491 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939548 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939601 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939681 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939756 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939835 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939893 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.939950 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940007 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940065 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940124 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940180 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940240 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940297 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940355 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940475 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940531 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940588 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940643 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940696 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940755 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940833 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940889 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.940945 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941005 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941064 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941135 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941193 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941261 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941321 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941377 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941439 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941498 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941555 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941658 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941717 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941804 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941875 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941933 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.941994 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942055 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942118 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942178 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942239 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942304 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942364 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942424 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942483 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942540 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942598 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942657 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942715 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942793 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942861 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942923 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.942981 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.943046 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.943112 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.943171 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.943229 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.943289 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.943349 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.943419 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.943481 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.943543 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.943600 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.943700 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.943897 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.943970 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944035 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944100 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944158 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944217 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944299 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944383 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944445 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944505 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944560 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944618 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944679 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944739 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944835 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944914 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.944993 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945067 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945130 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945190 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945256 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945317 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945384 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945441 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945506 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945568 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945628 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945689 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945749 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945831 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945893 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945950 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946011 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.945932 5055 manager.go:324] Recovery completed Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946078 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946216 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946289 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946327 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946351 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946375 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946398 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946423 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946448 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946470 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946494 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946514 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946533 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946555 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946590 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946612 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946637 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946659 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946680 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946701 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.946723 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950096 5055 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950173 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950206 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950229 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950254 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950278 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950301 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950329 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950368 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950397 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950430 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950460 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950490 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950522 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950552 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950577 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950608 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950667 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950697 5055 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950724 5055 reconstruct.go:97] "Volume reconstruction finished" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.950742 5055 reconciler.go:26] "Reconciler: start to sync state" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.958063 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.959743 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.959904 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.959988 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.960850 5055 cpu_manager.go:225] "Starting CPU manager" policy="none" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.960899 5055 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.960922 5055 state_mem.go:36] "Initialized new in-memory state store" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.985531 5055 policy_none.go:49] "None policy: Start" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.988318 5055 memory_manager.go:170] "Starting memorymanager" policy="None" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.988356 5055 state_mem.go:35] "Initializing new in-memory state store" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.990177 5055 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.992137 5055 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.992187 5055 status_manager.go:217] "Starting to sync pod status with apiserver" Oct 11 06:53:36 crc kubenswrapper[5055]: I1011 06:53:36.992218 5055 kubelet.go:2335] "Starting kubelet main sync loop" Oct 11 06:53:36 crc kubenswrapper[5055]: E1011 06:53:36.992364 5055 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Oct 11 06:53:36 crc kubenswrapper[5055]: W1011 06:53:36.994386 5055 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:36 crc kubenswrapper[5055]: E1011 06:53:36.994449 5055 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.162:6443: connect: connection refused" logger="UnhandledError" Oct 11 06:53:37 crc kubenswrapper[5055]: E1011 06:53:37.014881 5055 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.040009 5055 manager.go:334] "Starting Device Plugin manager" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.040065 5055 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.040080 5055 server.go:79] "Starting device plugin registration server" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.040614 5055 eviction_manager.go:189] "Eviction manager: starting control loop" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.040639 5055 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.041301 5055 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.041393 5055 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.041403 5055 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Oct 11 06:53:37 crc kubenswrapper[5055]: E1011 06:53:37.049834 5055 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.092700 5055 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.092863 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.094603 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.094641 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.094654 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.094825 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.095042 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.095071 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.095866 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.095901 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.095921 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.095929 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.095904 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.095994 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.096097 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.096331 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.096420 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.097705 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.097731 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.097740 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.097857 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.097890 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.097907 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.098031 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.098135 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.098164 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.099223 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.099241 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.099250 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.099249 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.099341 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.099352 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.099386 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.099431 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.099344 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.100286 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.100316 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.100328 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.100472 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.100505 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.100521 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.100694 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.100725 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.102020 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.102327 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.102342 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:37 crc kubenswrapper[5055]: E1011 06:53:37.119001 5055 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.162:6443: connect: connection refused" interval="400ms" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.140746 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.142339 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.142383 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.142398 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.142457 5055 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 11 06:53:37 crc kubenswrapper[5055]: E1011 06:53:37.143058 5055 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.162:6443: connect: connection refused" node="crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.153806 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.153919 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.153958 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.153985 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.154003 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.154028 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.154081 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.154101 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.154119 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.154143 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.154167 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.154196 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.154220 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.154246 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.154265 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256013 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256077 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256113 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256142 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256173 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256203 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256232 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256262 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256291 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256323 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256351 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256381 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256408 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256436 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256465 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256430 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256683 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256804 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256853 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256930 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256985 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256999 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.257027 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.257043 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.257055 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.257053 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.257064 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.257082 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.256929 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.257191 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.344253 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.346561 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.346621 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.346640 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.352296 5055 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 11 06:53:37 crc kubenswrapper[5055]: E1011 06:53:37.353756 5055 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.162:6443: connect: connection refused" node="crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.426318 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.442599 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.462987 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.477339 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.482611 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 11 06:53:37 crc kubenswrapper[5055]: W1011 06:53:37.484602 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-b5b12826068c167c78387116ee3d9fb8d565bcafc19b896e501a691c1cf610cd WatchSource:0}: Error finding container b5b12826068c167c78387116ee3d9fb8d565bcafc19b896e501a691c1cf610cd: Status 404 returned error can't find the container with id b5b12826068c167c78387116ee3d9fb8d565bcafc19b896e501a691c1cf610cd Oct 11 06:53:37 crc kubenswrapper[5055]: W1011 06:53:37.488613 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-b4ad2f570e49e0db408f61a833b26864681a63e2146b2a1698c71f7705315c80 WatchSource:0}: Error finding container b4ad2f570e49e0db408f61a833b26864681a63e2146b2a1698c71f7705315c80: Status 404 returned error can't find the container with id b4ad2f570e49e0db408f61a833b26864681a63e2146b2a1698c71f7705315c80 Oct 11 06:53:37 crc kubenswrapper[5055]: W1011 06:53:37.502073 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-079ca14c57879adfdff15c00ee155daf2b0f89e37c07901fd8e4138ec10d237c WatchSource:0}: Error finding container 079ca14c57879adfdff15c00ee155daf2b0f89e37c07901fd8e4138ec10d237c: Status 404 returned error can't find the container with id 079ca14c57879adfdff15c00ee155daf2b0f89e37c07901fd8e4138ec10d237c Oct 11 06:53:37 crc kubenswrapper[5055]: E1011 06:53:37.520493 5055 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.162:6443: connect: connection refused" interval="800ms" Oct 11 06:53:37 crc kubenswrapper[5055]: W1011 06:53:37.708577 5055 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:37 crc kubenswrapper[5055]: E1011 06:53:37.708707 5055 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.162:6443: connect: connection refused" logger="UnhandledError" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.754930 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.756231 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.756272 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.756282 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.756307 5055 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 11 06:53:37 crc kubenswrapper[5055]: E1011 06:53:37.756879 5055 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.162:6443: connect: connection refused" node="crc" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.906932 5055 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:37 crc kubenswrapper[5055]: W1011 06:53:37.973295 5055 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:37 crc kubenswrapper[5055]: E1011 06:53:37.973411 5055 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.162:6443: connect: connection refused" logger="UnhandledError" Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.995633 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b5b12826068c167c78387116ee3d9fb8d565bcafc19b896e501a691c1cf610cd"} Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.996566 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b4ad2f570e49e0db408f61a833b26864681a63e2146b2a1698c71f7705315c80"} Oct 11 06:53:37 crc kubenswrapper[5055]: I1011 06:53:37.998607 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"511878d0d763ce7b09a1e890fd2f77c129796ee3f48da22af80a7f28eac28c94"} Oct 11 06:53:38 crc kubenswrapper[5055]: I1011 06:53:38.000368 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"e797eb21e52b05ec3c9ad0cf06485aecc6bfb85df8777d10f09022b5f7d8f170"} Oct 11 06:53:38 crc kubenswrapper[5055]: I1011 06:53:38.002698 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"079ca14c57879adfdff15c00ee155daf2b0f89e37c07901fd8e4138ec10d237c"} Oct 11 06:53:38 crc kubenswrapper[5055]: W1011 06:53:38.052358 5055 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:38 crc kubenswrapper[5055]: E1011 06:53:38.052430 5055 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.162:6443: connect: connection refused" logger="UnhandledError" Oct 11 06:53:38 crc kubenswrapper[5055]: W1011 06:53:38.140309 5055 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:38 crc kubenswrapper[5055]: E1011 06:53:38.140663 5055 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.162:6443: connect: connection refused" logger="UnhandledError" Oct 11 06:53:38 crc kubenswrapper[5055]: E1011 06:53:38.321364 5055 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.162:6443: connect: connection refused" interval="1.6s" Oct 11 06:53:38 crc kubenswrapper[5055]: I1011 06:53:38.557472 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:38 crc kubenswrapper[5055]: I1011 06:53:38.558715 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:38 crc kubenswrapper[5055]: I1011 06:53:38.558749 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:38 crc kubenswrapper[5055]: I1011 06:53:38.558777 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:38 crc kubenswrapper[5055]: I1011 06:53:38.558805 5055 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 11 06:53:38 crc kubenswrapper[5055]: E1011 06:53:38.559284 5055 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.162:6443: connect: connection refused" node="crc" Oct 11 06:53:38 crc kubenswrapper[5055]: I1011 06:53:38.906424 5055 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.006553 5055 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4" exitCode=0 Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.006632 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4"} Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.006736 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.007615 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.007667 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.007681 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.009755 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d"} Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.009799 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.009802 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad"} Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.009893 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff"} Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.009905 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8"} Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.010452 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.010477 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.010484 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.010727 5055 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c" exitCode=0 Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.010798 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.010801 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c"} Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.011508 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.011544 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.011556 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.011852 5055 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63" exitCode=0 Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.011903 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63"} Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.011907 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.012513 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.012532 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.012541 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.012600 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.013322 5055 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="5e3da21ed6a1e09981293dffd230ee013186f9dd41485dcd197b18a7a48eb2de" exitCode=0 Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.013357 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"5e3da21ed6a1e09981293dffd230ee013186f9dd41485dcd197b18a7a48eb2de"} Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.013390 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.013409 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.013424 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.013453 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.014149 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.014175 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.014188 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:39 crc kubenswrapper[5055]: W1011 06:53:39.604206 5055 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:39 crc kubenswrapper[5055]: E1011 06:53:39.604310 5055 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.162:6443: connect: connection refused" logger="UnhandledError" Oct 11 06:53:39 crc kubenswrapper[5055]: I1011 06:53:39.906740 5055 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:39 crc kubenswrapper[5055]: E1011 06:53:39.922749 5055 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.162:6443: connect: connection refused" interval="3.2s" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.016559 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"c6a3e233e4de1c86f7c71778e70332885b5e9d059928dcd320c3711cfff2df75"} Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.018357 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c"} Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.020522 5055 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38" exitCode=0 Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.020560 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38"} Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.020613 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.021437 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.021471 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.021485 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.023214 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"1f9ef26c5a288eb84c448982919e35766d8a19bcd6e1616ad40046cbd4037d08"} Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.023236 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.023236 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.024233 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.024256 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.024267 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.024265 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.024378 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.024392 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:40 crc kubenswrapper[5055]: W1011 06:53:40.065391 5055 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:40 crc kubenswrapper[5055]: E1011 06:53:40.065520 5055 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.162:6443: connect: connection refused" logger="UnhandledError" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.159837 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.161053 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.161081 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.161090 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.161111 5055 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 11 06:53:40 crc kubenswrapper[5055]: E1011 06:53:40.161546 5055 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.162:6443: connect: connection refused" node="crc" Oct 11 06:53:40 crc kubenswrapper[5055]: W1011 06:53:40.686371 5055 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:40 crc kubenswrapper[5055]: E1011 06:53:40.686437 5055 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.162:6443: connect: connection refused" logger="UnhandledError" Oct 11 06:53:40 crc kubenswrapper[5055]: I1011 06:53:40.906663 5055 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:40 crc kubenswrapper[5055]: W1011 06:53:40.953291 5055 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.162:6443: connect: connection refused Oct 11 06:53:40 crc kubenswrapper[5055]: E1011 06:53:40.953381 5055 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.162:6443: connect: connection refused" logger="UnhandledError" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.028967 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"f0492b1afae66a10f8f8f8decddb20d95f42a1bbb3dc3dbe92c4cbccdb71ff9b"} Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.029251 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"8aa639ce86b821591da60a6ee18ce1cb4d64b671dc50dfee3fff7d975967411f"} Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.029021 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.030631 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.030751 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.030859 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.031866 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.031861 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7"} Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.031986 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81"} Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.032010 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7"} Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.032023 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880"} Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.032756 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.032799 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.032808 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.034925 5055 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5" exitCode=0 Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.035015 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.035145 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5"} Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.035262 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.035894 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.035915 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.035923 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.035999 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.036021 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:41 crc kubenswrapper[5055]: I1011 06:53:41.036033 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.041063 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192"} Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.041119 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f"} Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.041132 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354"} Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.041143 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea"} Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.041157 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9"} Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.041191 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.041224 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.041190 5055 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.041290 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.041310 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.042334 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.042354 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.042365 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.042374 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.042340 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.042389 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.042398 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.042374 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.042484 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.741513 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.741698 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.742834 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.742863 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:42 crc kubenswrapper[5055]: I1011 06:53:42.742875 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:43 crc kubenswrapper[5055]: I1011 06:53:43.042526 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:43 crc kubenswrapper[5055]: I1011 06:53:43.042599 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:43 crc kubenswrapper[5055]: I1011 06:53:43.043631 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:43 crc kubenswrapper[5055]: I1011 06:53:43.043658 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:43 crc kubenswrapper[5055]: I1011 06:53:43.043666 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:43 crc kubenswrapper[5055]: I1011 06:53:43.043850 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:43 crc kubenswrapper[5055]: I1011 06:53:43.043870 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:43 crc kubenswrapper[5055]: I1011 06:53:43.043878 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:43 crc kubenswrapper[5055]: I1011 06:53:43.362173 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:43 crc kubenswrapper[5055]: I1011 06:53:43.363395 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:43 crc kubenswrapper[5055]: I1011 06:53:43.363433 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:43 crc kubenswrapper[5055]: I1011 06:53:43.363443 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:43 crc kubenswrapper[5055]: I1011 06:53:43.363469 5055 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 11 06:53:43 crc kubenswrapper[5055]: I1011 06:53:43.985129 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Oct 11 06:53:44 crc kubenswrapper[5055]: I1011 06:53:44.045917 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:44 crc kubenswrapper[5055]: I1011 06:53:44.047335 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:44 crc kubenswrapper[5055]: I1011 06:53:44.047379 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:44 crc kubenswrapper[5055]: I1011 06:53:44.047390 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:44 crc kubenswrapper[5055]: I1011 06:53:44.478434 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:44 crc kubenswrapper[5055]: I1011 06:53:44.478704 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:44 crc kubenswrapper[5055]: I1011 06:53:44.480196 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:44 crc kubenswrapper[5055]: I1011 06:53:44.480251 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:44 crc kubenswrapper[5055]: I1011 06:53:44.480272 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:44 crc kubenswrapper[5055]: I1011 06:53:44.758322 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:44 crc kubenswrapper[5055]: I1011 06:53:44.758574 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:44 crc kubenswrapper[5055]: I1011 06:53:44.760183 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:44 crc kubenswrapper[5055]: I1011 06:53:44.760264 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:44 crc kubenswrapper[5055]: I1011 06:53:44.760289 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:45 crc kubenswrapper[5055]: I1011 06:53:45.653614 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 06:53:45 crc kubenswrapper[5055]: I1011 06:53:45.653797 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:45 crc kubenswrapper[5055]: I1011 06:53:45.655281 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:45 crc kubenswrapper[5055]: I1011 06:53:45.655306 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:45 crc kubenswrapper[5055]: I1011 06:53:45.655314 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:45 crc kubenswrapper[5055]: I1011 06:53:45.923400 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:45 crc kubenswrapper[5055]: I1011 06:53:45.923582 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:45 crc kubenswrapper[5055]: I1011 06:53:45.928576 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:45 crc kubenswrapper[5055]: I1011 06:53:45.928647 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:45 crc kubenswrapper[5055]: I1011 06:53:45.929615 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:46 crc kubenswrapper[5055]: I1011 06:53:46.149854 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:46 crc kubenswrapper[5055]: I1011 06:53:46.150028 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:46 crc kubenswrapper[5055]: I1011 06:53:46.152026 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:46 crc kubenswrapper[5055]: I1011 06:53:46.152108 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:46 crc kubenswrapper[5055]: I1011 06:53:46.152121 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:46 crc kubenswrapper[5055]: I1011 06:53:46.575268 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:46 crc kubenswrapper[5055]: I1011 06:53:46.593753 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:47 crc kubenswrapper[5055]: E1011 06:53:47.050067 5055 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Oct 11 06:53:47 crc kubenswrapper[5055]: I1011 06:53:47.053368 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:47 crc kubenswrapper[5055]: I1011 06:53:47.054193 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:47 crc kubenswrapper[5055]: I1011 06:53:47.054226 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:47 crc kubenswrapper[5055]: I1011 06:53:47.054253 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:48 crc kubenswrapper[5055]: I1011 06:53:48.055280 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:48 crc kubenswrapper[5055]: I1011 06:53:48.056213 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:48 crc kubenswrapper[5055]: I1011 06:53:48.056241 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:48 crc kubenswrapper[5055]: I1011 06:53:48.056249 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:48 crc kubenswrapper[5055]: I1011 06:53:48.061382 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:49 crc kubenswrapper[5055]: I1011 06:53:49.058253 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:49 crc kubenswrapper[5055]: I1011 06:53:49.060069 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:49 crc kubenswrapper[5055]: I1011 06:53:49.060113 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:49 crc kubenswrapper[5055]: I1011 06:53:49.060125 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:49 crc kubenswrapper[5055]: I1011 06:53:49.150038 5055 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 11 06:53:49 crc kubenswrapper[5055]: I1011 06:53:49.150152 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 11 06:53:49 crc kubenswrapper[5055]: I1011 06:53:49.861600 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Oct 11 06:53:49 crc kubenswrapper[5055]: I1011 06:53:49.861888 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:49 crc kubenswrapper[5055]: I1011 06:53:49.863116 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:49 crc kubenswrapper[5055]: I1011 06:53:49.863148 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:49 crc kubenswrapper[5055]: I1011 06:53:49.863160 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:51 crc kubenswrapper[5055]: I1011 06:53:51.023676 5055 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Oct 11 06:53:51 crc kubenswrapper[5055]: I1011 06:53:51.024273 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Oct 11 06:53:51 crc kubenswrapper[5055]: I1011 06:53:51.029350 5055 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Oct 11 06:53:51 crc kubenswrapper[5055]: I1011 06:53:51.029505 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Oct 11 06:53:55 crc kubenswrapper[5055]: I1011 06:53:55.927395 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:55 crc kubenswrapper[5055]: I1011 06:53:55.928139 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:53:55 crc kubenswrapper[5055]: I1011 06:53:55.929138 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:53:55 crc kubenswrapper[5055]: I1011 06:53:55.929246 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:53:55 crc kubenswrapper[5055]: I1011 06:53:55.929347 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:53:55 crc kubenswrapper[5055]: I1011 06:53:55.934128 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.023589 5055 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.024469 5055 trace.go:236] Trace[390777949]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Oct-2025 06:53:45.755) (total time: 10268ms): Oct 11 06:53:56 crc kubenswrapper[5055]: Trace[390777949]: ---"Objects listed" error: 10268ms (06:53:56.024) Oct 11 06:53:56 crc kubenswrapper[5055]: Trace[390777949]: [10.268669311s] [10.268669311s] END Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.024486 5055 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.026285 5055 trace.go:236] Trace[1984993188]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Oct-2025 06:53:45.320) (total time: 10706ms): Oct 11 06:53:56 crc kubenswrapper[5055]: Trace[1984993188]: ---"Objects listed" error: 10705ms (06:53:56.026) Oct 11 06:53:56 crc kubenswrapper[5055]: Trace[1984993188]: [10.706100507s] [10.706100507s] END Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.026479 5055 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.026343 5055 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.026314 5055 trace.go:236] Trace[116650980]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Oct-2025 06:53:45.709) (total time: 10316ms): Oct 11 06:53:56 crc kubenswrapper[5055]: Trace[116650980]: ---"Objects listed" error: 10316ms (06:53:56.026) Oct 11 06:53:56 crc kubenswrapper[5055]: Trace[116650980]: [10.316781317s] [10.316781317s] END Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.026606 5055 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.027668 5055 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.028222 5055 trace.go:236] Trace[587381719]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (11-Oct-2025 06:53:43.542) (total time: 12485ms): Oct 11 06:53:56 crc kubenswrapper[5055]: Trace[587381719]: ---"Objects listed" error: 12485ms (06:53:56.028) Oct 11 06:53:56 crc kubenswrapper[5055]: Trace[587381719]: [12.485783232s] [12.485783232s] END Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.028240 5055 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.072687 5055 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33792->192.168.126.11:17697: read: connection reset by peer" start-of-body= Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.072689 5055 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33798->192.168.126.11:17697: read: connection reset by peer" start-of-body= Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.072755 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33792->192.168.126.11:17697: read: connection reset by peer" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.072827 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33798->192.168.126.11:17697: read: connection reset by peer" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.076002 5055 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.076088 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.153787 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.157517 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.896827 5055 apiserver.go:52] "Watching apiserver" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.903319 5055 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.903793 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-multus/multus-4lplf","openshift-multus/multus-additional-cni-plugins-rbwct","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-dns/node-resolver-vmbd2","openshift-kube-apiserver/kube-apiserver-crc","openshift-machine-config-operator/machine-config-daemon-qtqvf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-target-xd92c"] Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.904104 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.904188 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.904231 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.904485 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.904651 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.904687 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.904706 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.905083 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.905158 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.905278 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.905366 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-vmbd2" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.905379 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.905447 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.914156 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.914337 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.914380 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.914458 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.914703 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.914732 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.914943 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.914968 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.914989 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.914987 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.915025 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.915072 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.915082 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.915121 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.915096 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.915193 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.915207 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.915292 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.915398 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.915452 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.915403 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.915536 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.916006 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.917941 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.923575 5055 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933133 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933173 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933193 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933213 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933233 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933256 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933277 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933300 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933321 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933345 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933367 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933382 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933399 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933415 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933429 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933444 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933460 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933477 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933495 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933512 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933562 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933583 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933599 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933614 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933630 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933649 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933664 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933681 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933696 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933719 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933781 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933820 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933839 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933865 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933887 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933939 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933959 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933979 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.933990 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934002 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934057 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934077 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934120 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934140 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934137 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934157 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934165 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934156 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934335 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934344 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934366 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934387 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934395 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934419 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934416 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934437 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934456 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934480 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934500 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934518 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934536 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934552 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934559 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934568 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934674 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934683 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934687 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934683 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934756 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934795 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934802 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934830 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934852 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934859 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934886 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934939 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934964 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.934990 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935012 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935037 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935058 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935078 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935099 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935120 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935142 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935164 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935185 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935205 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935226 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935248 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935268 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935294 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935316 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935313 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935338 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935406 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935428 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935427 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935447 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935456 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935464 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935482 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935501 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935518 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935533 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935543 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935550 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935586 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935611 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935632 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935656 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935678 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935699 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935724 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935730 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935747 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935800 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935821 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935845 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935868 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935891 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935912 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935924 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935934 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.935979 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936010 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936044 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936063 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936080 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936098 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936117 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936140 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936142 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936159 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936177 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936181 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936573 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936599 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936617 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936639 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936660 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936678 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936696 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936713 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936732 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936749 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936782 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936805 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936826 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936845 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936880 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936898 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936999 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.936914 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937264 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937282 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937297 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937313 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937332 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937348 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937363 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937379 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937396 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937414 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937431 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937449 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937466 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937481 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937497 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937513 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937528 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937543 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937559 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937576 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937594 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937610 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937624 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937640 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937656 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937672 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937688 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937703 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937724 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937741 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937756 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937799 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937816 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937836 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937852 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937868 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937884 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937901 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937916 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937946 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937962 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937980 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.937997 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938013 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938029 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938046 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938062 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938079 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938097 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938113 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938128 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938145 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938160 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938176 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938192 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938318 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938336 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938352 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938367 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938383 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938401 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938403 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938419 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938479 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938510 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938611 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938715 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-etc-kubernetes\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938755 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938798 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-run-netns\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938821 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/14178151-9e5c-49bd-941f-5178607a5ad4-os-release\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938840 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/99289791-dfca-4a6e-81a1-792954f034af-hosts-file\") pod \"node-resolver-vmbd2\" (UID: \"99289791-dfca-4a6e-81a1-792954f034af\") " pod="openshift-dns/node-resolver-vmbd2" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938855 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938870 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938950 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938966 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.938863 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939128 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/14178151-9e5c-49bd-941f-5178607a5ad4-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939158 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939186 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939222 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-run-k8s-cni-cncf-io\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939254 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/46789346-5a88-43a0-ad63-b530185c8ca1-mcd-auth-proxy-config\") pod \"machine-config-daemon-qtqvf\" (UID: \"46789346-5a88-43a0-ad63-b530185c8ca1\") " pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939273 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fthjw\" (UniqueName: \"kubernetes.io/projected/14178151-9e5c-49bd-941f-5178607a5ad4-kube-api-access-fthjw\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939297 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-cnibin\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939314 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/14178151-9e5c-49bd-941f-5178607a5ad4-cni-binary-copy\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939330 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/14178151-9e5c-49bd-941f-5178607a5ad4-tuning-conf-dir\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939348 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/14178151-9e5c-49bd-941f-5178607a5ad4-system-cni-dir\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939364 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-var-lib-cni-bin\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939379 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-multus-daemon-config\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939392 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-cni-binary-copy\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939407 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939463 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939483 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-system-cni-dir\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939503 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-multus-cni-dir\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939521 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-multus-conf-dir\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939547 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939568 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939584 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/46789346-5a88-43a0-ad63-b530185c8ca1-proxy-tls\") pod \"machine-config-daemon-qtqvf\" (UID: \"46789346-5a88-43a0-ad63-b530185c8ca1\") " pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939602 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-hostroot\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939608 5055 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939618 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-var-lib-cni-multus\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939637 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939655 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939671 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-multus-socket-dir-parent\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939692 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szpfm\" (UniqueName: \"kubernetes.io/projected/99289791-dfca-4a6e-81a1-792954f034af-kube-api-access-szpfm\") pod \"node-resolver-vmbd2\" (UID: \"99289791-dfca-4a6e-81a1-792954f034af\") " pod="openshift-dns/node-resolver-vmbd2" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939714 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939732 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-var-lib-kubelet\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939750 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939785 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939802 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-run-multus-certs\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939817 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/46789346-5a88-43a0-ad63-b530185c8ca1-rootfs\") pod \"machine-config-daemon-qtqvf\" (UID: \"46789346-5a88-43a0-ad63-b530185c8ca1\") " pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939835 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939850 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/14178151-9e5c-49bd-941f-5178607a5ad4-cnibin\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939870 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939884 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-os-release\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939901 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vwh6\" (UniqueName: \"kubernetes.io/projected/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-kube-api-access-8vwh6\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939916 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shprt\" (UniqueName: \"kubernetes.io/projected/46789346-5a88-43a0-ad63-b530185c8ca1-kube-api-access-shprt\") pod \"machine-config-daemon-qtqvf\" (UID: \"46789346-5a88-43a0-ad63-b530185c8ca1\") " pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940003 5055 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940015 5055 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940039 5055 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940049 5055 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940059 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940069 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940080 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940090 5055 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940100 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940109 5055 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940120 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940130 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940140 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940150 5055 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940160 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940169 5055 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939225 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.939305 5055 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939466 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.939632 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940114 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940269 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940415 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940598 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.940616 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:53:57.440578847 +0000 UTC m=+21.214852654 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940711 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.940750 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.941989 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.942456 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.942715 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.942948 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.943242 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.947017 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.947140 5055 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.947201 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:53:57.447185955 +0000 UTC m=+21.221459762 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.947244 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.947495 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.947624 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.947636 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.947681 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.947979 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.948186 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.948255 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.948411 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.949668 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.949799 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.949802 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.950955 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.950114 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.950598 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.950352 5055 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.952537 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.952558 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.952575 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.952592 5055 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.952606 5055 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.952619 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.952633 5055 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.952646 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.952662 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.952676 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.952691 5055 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.952705 5055 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.952719 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.953269 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.953478 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.953577 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.953630 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.953729 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.950822 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.950899 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.951128 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.951137 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.953947 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.954382 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.954753 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.954468 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.954933 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.955266 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.956662 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.957045 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.957223 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.957400 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.957482 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.957575 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.957757 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.957981 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.958618 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.958992 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.959095 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.959729 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.959754 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.959835 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.960058 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.960492 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.961474 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.961543 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.961792 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.961870 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.962078 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.962295 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.963737 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.963979 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.964030 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.964146 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:53:57.464118856 +0000 UTC m=+21.238392663 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.964388 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.964450 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.964465 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.964513 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.964542 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.964759 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.964815 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.964933 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.965248 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.965644 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.965677 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.965804 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.966005 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.966011 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.966030 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.966147 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.966188 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.966273 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.966420 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.966617 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.966693 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.966713 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.966741 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.966689 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.967549 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.968028 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.968042 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.968446 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.968434 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.968467 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.968488 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.968594 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.968785 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.968858 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.969523 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.969656 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.969812 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.969696 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.968081 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.969907 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.969931 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.969997 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.969695 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.970036 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.970142 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.970299 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.970351 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.970429 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.980149 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.980283 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.969200 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.980751 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.980757 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.981059 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.981322 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.981486 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.981546 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.981579 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.981615 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.981953 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.982167 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.982529 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.982683 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.982897 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.983698 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.983726 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.983982 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.984052 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.984965 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.985036 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.985058 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.985078 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.985097 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.985831 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.985905 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.989558 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.989595 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.989597 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.989957 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.990712 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.990842 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.980310 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.990675 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.991116 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.991140 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.991153 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.991162 5055 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.991498 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 06:53:57.491220776 +0000 UTC m=+21.265494583 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.991824 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.992018 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.992043 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.992056 5055 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.992225 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: E1011 06:53:56.992292 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 06:53:57.492254945 +0000 UTC m=+21.266528752 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.992474 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.993004 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.993050 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.993451 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.993563 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.993833 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.994093 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.994154 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.994482 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.994525 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:56 crc kubenswrapper[5055]: I1011 06:53:56.994545 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:56.999974 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.001240 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.002495 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.003787 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.010243 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.011244 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.012586 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.013242 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.014543 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.015237 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.015853 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.017183 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.024312 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.025549 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.032781 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.036013 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.036900 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.037433 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.038204 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.038846 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.039163 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.039692 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.040332 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.042114 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.045890 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.050657 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.051395 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.052240 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053205 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-run-k8s-cni-cncf-io\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053243 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/14178151-9e5c-49bd-941f-5178607a5ad4-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053264 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/46789346-5a88-43a0-ad63-b530185c8ca1-mcd-auth-proxy-config\") pod \"machine-config-daemon-qtqvf\" (UID: \"46789346-5a88-43a0-ad63-b530185c8ca1\") " pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053279 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-cnibin\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053298 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/14178151-9e5c-49bd-941f-5178607a5ad4-cni-binary-copy\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053313 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/14178151-9e5c-49bd-941f-5178607a5ad4-tuning-conf-dir\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053328 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fthjw\" (UniqueName: \"kubernetes.io/projected/14178151-9e5c-49bd-941f-5178607a5ad4-kube-api-access-fthjw\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053344 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/14178151-9e5c-49bd-941f-5178607a5ad4-system-cni-dir\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053360 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-var-lib-cni-bin\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053381 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-multus-daemon-config\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053404 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-cni-binary-copy\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053422 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053437 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-system-cni-dir\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053451 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-multus-cni-dir\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053466 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-multus-conf-dir\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053498 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-hostroot\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053505 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-run-k8s-cni-cncf-io\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053514 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/46789346-5a88-43a0-ad63-b530185c8ca1-proxy-tls\") pod \"machine-config-daemon-qtqvf\" (UID: \"46789346-5a88-43a0-ad63-b530185c8ca1\") " pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053584 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053602 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-multus-socket-dir-parent\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053622 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-var-lib-cni-multus\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053653 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-var-lib-kubelet\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053654 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-hostroot\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053670 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szpfm\" (UniqueName: \"kubernetes.io/projected/99289791-dfca-4a6e-81a1-792954f034af-kube-api-access-szpfm\") pod \"node-resolver-vmbd2\" (UID: \"99289791-dfca-4a6e-81a1-792954f034af\") " pod="openshift-dns/node-resolver-vmbd2" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053783 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-run-multus-certs\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053815 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/46789346-5a88-43a0-ad63-b530185c8ca1-rootfs\") pod \"machine-config-daemon-qtqvf\" (UID: \"46789346-5a88-43a0-ad63-b530185c8ca1\") " pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053839 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-os-release\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053861 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vwh6\" (UniqueName: \"kubernetes.io/projected/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-kube-api-access-8vwh6\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053885 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shprt\" (UniqueName: \"kubernetes.io/projected/46789346-5a88-43a0-ad63-b530185c8ca1-kube-api-access-shprt\") pod \"machine-config-daemon-qtqvf\" (UID: \"46789346-5a88-43a0-ad63-b530185c8ca1\") " pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053907 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/14178151-9e5c-49bd-941f-5178607a5ad4-cnibin\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053917 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053936 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-etc-kubernetes\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053958 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-var-lib-cni-multus\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053968 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-etc-kubernetes\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053984 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-run-netns\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.053836 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-multus-cni-dir\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054041 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/14178151-9e5c-49bd-941f-5178607a5ad4-system-cni-dir\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054044 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/14178151-9e5c-49bd-941f-5178607a5ad4-cnibin\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054089 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-cnibin\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054113 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-var-lib-kubelet\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054152 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-run-netns\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054411 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/14178151-9e5c-49bd-941f-5178607a5ad4-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054508 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-multus-socket-dir-parent\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054566 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-os-release\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054590 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-multus-conf-dir\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054629 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-var-lib-cni-bin\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054642 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-cni-binary-copy\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054672 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054759 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-system-cni-dir\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054810 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-host-run-multus-certs\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054807 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/14178151-9e5c-49bd-941f-5178607a5ad4-cni-binary-copy\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054889 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/14178151-9e5c-49bd-941f-5178607a5ad4-os-release\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054906 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/99289791-dfca-4a6e-81a1-792954f034af-hosts-file\") pod \"node-resolver-vmbd2\" (UID: \"99289791-dfca-4a6e-81a1-792954f034af\") " pod="openshift-dns/node-resolver-vmbd2" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.054981 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/46789346-5a88-43a0-ad63-b530185c8ca1-mcd-auth-proxy-config\") pod \"machine-config-daemon-qtqvf\" (UID: \"46789346-5a88-43a0-ad63-b530185c8ca1\") " pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055017 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/14178151-9e5c-49bd-941f-5178607a5ad4-tuning-conf-dir\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055100 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/14178151-9e5c-49bd-941f-5178607a5ad4-os-release\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055115 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/99289791-dfca-4a6e-81a1-792954f034af-hosts-file\") pod \"node-resolver-vmbd2\" (UID: \"99289791-dfca-4a6e-81a1-792954f034af\") " pod="openshift-dns/node-resolver-vmbd2" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055133 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055127 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/46789346-5a88-43a0-ad63-b530185c8ca1-rootfs\") pod \"machine-config-daemon-qtqvf\" (UID: \"46789346-5a88-43a0-ad63-b530185c8ca1\") " pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055329 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055342 5055 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055353 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055363 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055372 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055381 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055390 5055 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055399 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055407 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055415 5055 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055423 5055 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055436 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055445 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055454 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055462 5055 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055470 5055 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055480 5055 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055504 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055512 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055521 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055529 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055538 5055 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055547 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055560 5055 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055527 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055633 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-multus-daemon-config\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055569 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055661 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055670 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055679 5055 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055688 5055 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055696 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055705 5055 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055714 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055722 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055730 5055 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055740 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055748 5055 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055757 5055 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055798 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055807 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055815 5055 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055826 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055834 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055844 5055 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055857 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055865 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055873 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055882 5055 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055889 5055 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055897 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055905 5055 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055914 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055923 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055931 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055939 5055 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055949 5055 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055958 5055 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055967 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055975 5055 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055983 5055 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.055992 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056001 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056009 5055 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056017 5055 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056026 5055 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056034 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056044 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056053 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056062 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056070 5055 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056079 5055 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056088 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056096 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056105 5055 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056113 5055 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056121 5055 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056130 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056140 5055 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056147 5055 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056155 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056163 5055 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056172 5055 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056182 5055 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056190 5055 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056198 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056207 5055 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056214 5055 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056222 5055 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056231 5055 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056238 5055 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056246 5055 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056254 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056262 5055 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056269 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056277 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056285 5055 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056298 5055 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056307 5055 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056316 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056324 5055 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056332 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056341 5055 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056350 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056358 5055 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056366 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056400 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056408 5055 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056417 5055 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056425 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056435 5055 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056444 5055 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056453 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056462 5055 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056470 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056479 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056488 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056497 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056505 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056514 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056522 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056531 5055 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056540 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056547 5055 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056555 5055 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056564 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056573 5055 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056581 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056590 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056599 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056608 5055 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056616 5055 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056624 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056632 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056641 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056653 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056662 5055 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056670 5055 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056678 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056686 5055 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056695 5055 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056703 5055 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056712 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056721 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056729 5055 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056737 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056744 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.056752 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064388 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/46789346-5a88-43a0-ad63-b530185c8ca1-proxy-tls\") pod \"machine-config-daemon-qtqvf\" (UID: \"46789346-5a88-43a0-ad63-b530185c8ca1\") " pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064790 5055 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064846 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064859 5055 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064870 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064881 5055 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064889 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064901 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064912 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064922 5055 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064930 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064940 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064950 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064960 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064971 5055 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064979 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.064987 5055 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.065560 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.065981 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.067363 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.068221 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.068287 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.072009 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.072846 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.073324 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.074312 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shprt\" (UniqueName: \"kubernetes.io/projected/46789346-5a88-43a0-ad63-b530185c8ca1-kube-api-access-shprt\") pod \"machine-config-daemon-qtqvf\" (UID: \"46789346-5a88-43a0-ad63-b530185c8ca1\") " pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.074864 5055 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.074961 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.076644 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.078783 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.083080 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.084580 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.086271 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fthjw\" (UniqueName: \"kubernetes.io/projected/14178151-9e5c-49bd-941f-5178607a5ad4-kube-api-access-fthjw\") pod \"multus-additional-cni-plugins-rbwct\" (UID: \"14178151-9e5c-49bd-941f-5178607a5ad4\") " pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.086354 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vwh6\" (UniqueName: \"kubernetes.io/projected/c2f344f5-5570-4fb6-b59d-5b881cd1d2cc-kube-api-access-8vwh6\") pod \"multus-4lplf\" (UID: \"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\") " pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.086519 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.088724 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.089059 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.091718 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szpfm\" (UniqueName: \"kubernetes.io/projected/99289791-dfca-4a6e-81a1-792954f034af-kube-api-access-szpfm\") pod \"node-resolver-vmbd2\" (UID: \"99289791-dfca-4a6e-81a1-792954f034af\") " pod="openshift-dns/node-resolver-vmbd2" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.092987 5055 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7" exitCode=255 Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.093851 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.094210 5055 scope.go:117] "RemoveContainer" containerID="c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.094421 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.098401 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.099326 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.103411 5055 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.103833 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.104128 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.105157 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.106214 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.107439 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.108111 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.109064 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.109572 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.110859 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.111311 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.111776 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.112563 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.113121 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.114270 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.114966 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.115424 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7"} Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.115459 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5dg24"] Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.116248 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.116414 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.118053 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.118269 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.118646 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.118693 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.118647 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.119186 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.119932 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.128950 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.138357 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.147739 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.155879 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.165703 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-cni-bin\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.165745 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-systemd\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.165788 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvpvs\" (UniqueName: \"kubernetes.io/projected/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-kube-api-access-tvpvs\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.165811 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovnkube-config\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.165832 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-openvswitch\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.165856 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-cni-netd\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.165888 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.165946 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-env-overrides\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.166049 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-run-ovn-kubernetes\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.166091 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovnkube-script-lib\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.166905 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-var-lib-openvswitch\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.166980 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-ovn\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.167018 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-kubelet\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.167043 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-systemd-units\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.167091 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-run-netns\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.167114 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-slash\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.167134 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-etc-openvswitch\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.167152 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-log-socket\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.167182 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-node-log\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.167214 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovn-node-metrics-cert\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.170549 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.170828 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.170925 5055 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.184923 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.195885 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.204900 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.216260 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.221340 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.222686 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.230490 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.241305 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.249130 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.258025 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-rbwct" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.272757 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-node-log\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.272819 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovn-node-metrics-cert\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.272842 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-cni-bin\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.272859 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-systemd\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.272873 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvpvs\" (UniqueName: \"kubernetes.io/projected/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-kube-api-access-tvpvs\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.272888 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovnkube-config\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.272903 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-cni-netd\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.272918 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.272943 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-openvswitch\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.272957 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-env-overrides\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.272972 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-run-ovn-kubernetes\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.272987 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovnkube-script-lib\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273002 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-ovn\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273017 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-var-lib-openvswitch\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273042 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-kubelet\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273058 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-systemd-units\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273077 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-run-netns\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273092 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-log-socket\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273108 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-slash\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273120 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-etc-openvswitch\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273174 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-etc-openvswitch\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273210 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-cni-netd\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273231 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273251 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-openvswitch\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273616 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovnkube-config\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273656 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-env-overrides\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273686 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-node-log\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273710 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-run-ovn-kubernetes\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.273847 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-cni-bin\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.274105 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-systemd-units\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.274165 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-kubelet\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.274187 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-var-lib-openvswitch\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.274214 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-log-socket\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.274239 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-run-netns\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.274241 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-ovn\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.274263 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-slash\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.274279 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-systemd\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.274338 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovnkube-script-lib\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.277978 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovn-node-metrics-cert\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.288367 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.295383 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvpvs\" (UniqueName: \"kubernetes.io/projected/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-kube-api-access-tvpvs\") pod \"ovnkube-node-5dg24\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.300370 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.302550 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-vmbd2" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.310412 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.322407 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.327416 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-4lplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.333070 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.343472 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.355614 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.367344 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.375199 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.383217 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.393273 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: W1011 06:53:57.393941 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2f344f5_5570_4fb6_b59d_5b881cd1d2cc.slice/crio-c0f4170032c83723aa22d30c196e7a446d76955eebf6eb1cc41e3e6b54780181 WatchSource:0}: Error finding container c0f4170032c83723aa22d30c196e7a446d76955eebf6eb1cc41e3e6b54780181: Status 404 returned error can't find the container with id c0f4170032c83723aa22d30c196e7a446d76955eebf6eb1cc41e3e6b54780181 Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.403687 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.413236 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.422927 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.433533 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.438345 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.454978 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.476490 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.476684 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.476909 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:53:58.476873742 +0000 UTC m=+22.251147709 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.476977 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.477033 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.477143 5055 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.477180 5055 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.477215 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:53:58.477196321 +0000 UTC m=+22.251470328 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.477239 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:53:58.477227222 +0000 UTC m=+22.251501029 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.487253 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.500144 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.525695 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.542312 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.558031 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.577664 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.577716 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.577955 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.577976 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.578002 5055 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.578036 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 06:53:58.578023615 +0000 UTC m=+22.352297422 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.578098 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.578110 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.578117 5055 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.578152 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 06:53:58.578130738 +0000 UTC m=+22.352404545 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:53:57 crc kubenswrapper[5055]: I1011 06:53:57.992616 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:53:57 crc kubenswrapper[5055]: E1011 06:53:57.992732 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.099723 5055 generic.go:334] "Generic (PLEG): container finished" podID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerID="bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242" exitCode=0 Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.099819 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerDied","Data":"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.099869 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerStarted","Data":"7fe38a71ba9b53123eaf6c08fa7d39cbb921d8e7613f0ca6c6e71496a83a4b7d"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.101237 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-vmbd2" event={"ID":"99289791-dfca-4a6e-81a1-792954f034af","Type":"ContainerStarted","Data":"d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.101277 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-vmbd2" event={"ID":"99289791-dfca-4a6e-81a1-792954f034af","Type":"ContainerStarted","Data":"03b2da5697279951e4deb5e3481587c9cf716a85f91a6503e582eccd07e8084e"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.103370 5055 generic.go:334] "Generic (PLEG): container finished" podID="14178151-9e5c-49bd-941f-5178607a5ad4" containerID="347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11" exitCode=0 Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.103466 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" event={"ID":"14178151-9e5c-49bd-941f-5178607a5ad4","Type":"ContainerDied","Data":"347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.103535 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" event={"ID":"14178151-9e5c-49bd-941f-5178607a5ad4","Type":"ContainerStarted","Data":"d769e6de7821f60028229aab92d8c9b8713757ac5beead315080fb7741f87074"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.104838 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"5a4cd9e64998f32a11301d5a3c9c72d51412e88342118b541ebd946b491c249d"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.107572 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.107608 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.107621 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"9e41e8cfdf5e4d31a5c232ae8864ead1cd44ef4564b29361aa332834bddcd375"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.109290 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.109319 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"eb89c11f3d1c18cb9ded6847ebf8320cc4e18b5b568a51a84c1fed98ffe33d1c"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.110405 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.114258 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.119903 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.120205 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.122043 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4lplf" event={"ID":"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc","Type":"ContainerStarted","Data":"4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.122090 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4lplf" event={"ID":"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc","Type":"ContainerStarted","Data":"c0f4170032c83723aa22d30c196e7a446d76955eebf6eb1cc41e3e6b54780181"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.124153 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.124228 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.124245 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"90614beb85e42731b31d71b11ee87389c83af09b7d9d1a52d5bf3982d1adeba5"} Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.132812 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.156084 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.172049 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.185930 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.199503 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.217648 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.230511 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.241342 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.250387 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.261223 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.272330 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.290462 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:58Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.306233 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:58Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.321387 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:58Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.339672 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:58Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.357525 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:58Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.376222 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:58Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.395711 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:58Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.414593 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:58Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.429433 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:58Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.443819 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:58Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.458820 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:58Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.472060 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:58Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.487254 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.487396 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.487433 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:54:00.487407498 +0000 UTC m=+24.261681315 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.487504 5055 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.487557 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:00.487544422 +0000 UTC m=+24.261818229 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.487584 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.487703 5055 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.487753 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:00.487742838 +0000 UTC m=+24.262016645 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.495183 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:58Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.509744 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:58Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.589037 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.589079 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.589191 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.589204 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.589225 5055 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.589264 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:00.589251581 +0000 UTC m=+24.363525388 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.589544 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.589562 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.589569 5055 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.589591 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:00.589584021 +0000 UTC m=+24.363857828 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.992620 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.992673 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.992963 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:53:58 crc kubenswrapper[5055]: E1011 06:53:58.993055 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:53:58 crc kubenswrapper[5055]: I1011 06:53:58.998038 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.130382 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerStarted","Data":"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee"} Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.130755 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerStarted","Data":"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c"} Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.130906 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerStarted","Data":"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870"} Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.131027 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerStarted","Data":"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd"} Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.131111 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerStarted","Data":"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861"} Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.131189 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerStarted","Data":"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd"} Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.132214 5055 generic.go:334] "Generic (PLEG): container finished" podID="14178151-9e5c-49bd-941f-5178607a5ad4" containerID="cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811" exitCode=0 Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.132366 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" event={"ID":"14178151-9e5c-49bd-941f-5178607a5ad4","Type":"ContainerDied","Data":"cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811"} Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.158185 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.176393 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.188225 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.199231 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.211713 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.223939 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.236969 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.249448 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.261951 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.272568 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.287685 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.308381 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.329919 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.677868 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-892mt"] Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.678234 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-892mt" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.680327 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.680404 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.680586 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.681346 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.692515 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.716977 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.763356 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.782289 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.794937 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.803156 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26-host\") pod \"node-ca-892mt\" (UID: \"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\") " pod="openshift-image-registry/node-ca-892mt" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.803209 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwgpq\" (UniqueName: \"kubernetes.io/projected/b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26-kube-api-access-jwgpq\") pod \"node-ca-892mt\" (UID: \"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\") " pod="openshift-image-registry/node-ca-892mt" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.803232 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26-serviceca\") pod \"node-ca-892mt\" (UID: \"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\") " pod="openshift-image-registry/node-ca-892mt" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.806020 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.817548 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.829286 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.841677 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.852284 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.867998 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.879919 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.884121 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.897204 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.899587 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.900947 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.904057 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26-host\") pod \"node-ca-892mt\" (UID: \"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\") " pod="openshift-image-registry/node-ca-892mt" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.904109 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwgpq\" (UniqueName: \"kubernetes.io/projected/b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26-kube-api-access-jwgpq\") pod \"node-ca-892mt\" (UID: \"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\") " pod="openshift-image-registry/node-ca-892mt" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.904131 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26-serviceca\") pod \"node-ca-892mt\" (UID: \"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\") " pod="openshift-image-registry/node-ca-892mt" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.904149 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26-host\") pod \"node-ca-892mt\" (UID: \"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\") " pod="openshift-image-registry/node-ca-892mt" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.905607 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26-serviceca\") pod \"node-ca-892mt\" (UID: \"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\") " pod="openshift-image-registry/node-ca-892mt" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.910992 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.921921 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.923395 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwgpq\" (UniqueName: \"kubernetes.io/projected/b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26-kube-api-access-jwgpq\") pod \"node-ca-892mt\" (UID: \"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\") " pod="openshift-image-registry/node-ca-892mt" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.932366 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.942443 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.951897 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.961821 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.972214 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.984825 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.989776 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-892mt" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.993155 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:53:59 crc kubenswrapper[5055]: E1011 06:53:59.993266 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:53:59 crc kubenswrapper[5055]: I1011 06:53:59.996853 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:53:59Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: W1011 06:54:00.002332 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb70778bc_7f9e_4ef8_b3d6_35ea5c9a9b26.slice/crio-1875d0395306003d00e97cbc62cd91048eb13d3ac4bfef5f26ea130cb33ddc6f WatchSource:0}: Error finding container 1875d0395306003d00e97cbc62cd91048eb13d3ac4bfef5f26ea130cb33ddc6f: Status 404 returned error can't find the container with id 1875d0395306003d00e97cbc62cd91048eb13d3ac4bfef5f26ea130cb33ddc6f Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.010691 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.027399 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.047593 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.086077 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.130378 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.137074 5055 generic.go:334] "Generic (PLEG): container finished" podID="14178151-9e5c-49bd-941f-5178607a5ad4" containerID="b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e" exitCode=0 Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.137148 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" event={"ID":"14178151-9e5c-49bd-941f-5178607a5ad4","Type":"ContainerDied","Data":"b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e"} Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.139879 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901"} Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.143177 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-892mt" event={"ID":"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26","Type":"ContainerStarted","Data":"1875d0395306003d00e97cbc62cd91048eb13d3ac4bfef5f26ea130cb33ddc6f"} Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.175332 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.185198 5055 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"etcd-crc\" already exists" pod="openshift-etcd/etcd-crc" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.226104 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.267750 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.311803 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.349217 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.390946 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.424250 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.466865 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.509823 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.514092 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.514312 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.514424 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.514641 5055 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.514823 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:54:04.514800339 +0000 UTC m=+28.289074146 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.515303 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:04.515264052 +0000 UTC m=+28.289537859 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.515304 5055 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.515377 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:04.515366385 +0000 UTC m=+28.289640202 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.549613 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.584825 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.615751 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.615972 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.615918 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.616072 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.616083 5055 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.616129 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:04.616116407 +0000 UTC m=+28.390390214 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.616263 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.616325 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.616376 5055 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.616453 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:04.616444307 +0000 UTC m=+28.390718114 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.627774 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.665877 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.714050 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.748742 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.794154 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.832230 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:00Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.993908 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:00 crc kubenswrapper[5055]: I1011 06:54:00.993929 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.994040 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:00 crc kubenswrapper[5055]: E1011 06:54:00.994152 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.147514 5055 generic.go:334] "Generic (PLEG): container finished" podID="14178151-9e5c-49bd-941f-5178607a5ad4" containerID="e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557" exitCode=0 Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.147548 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" event={"ID":"14178151-9e5c-49bd-941f-5178607a5ad4","Type":"ContainerDied","Data":"e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557"} Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.149552 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-892mt" event={"ID":"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26","Type":"ContainerStarted","Data":"70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38"} Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.154597 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerStarted","Data":"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f"} Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.167385 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.181275 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.193985 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.212396 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.223263 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.236602 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.250717 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.262554 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.273871 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.284858 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.297088 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.305755 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.346810 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.387702 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.426756 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.467358 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.506476 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.550946 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.596673 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.624392 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.667612 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.711154 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.747188 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.787456 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.824570 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.875729 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.908845 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.946933 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.987574 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:01Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:01 crc kubenswrapper[5055]: I1011 06:54:01.992648 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:01 crc kubenswrapper[5055]: E1011 06:54:01.992745 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.029068 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.159548 5055 generic.go:334] "Generic (PLEG): container finished" podID="14178151-9e5c-49bd-941f-5178607a5ad4" containerID="364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1" exitCode=0 Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.159615 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" event={"ID":"14178151-9e5c-49bd-941f-5178607a5ad4","Type":"ContainerDied","Data":"364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1"} Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.172639 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.185619 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.212467 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.232318 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.244217 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.266109 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.306473 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.345894 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.387034 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.424125 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.427783 5055 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.429645 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.429697 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.429713 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.429934 5055 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.488432 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.499705 5055 kubelet_node_status.go:115] "Node was previously registered" node="crc" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.499977 5055 kubelet_node_status.go:79] "Successfully registered node" node="crc" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.501024 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.501051 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.501062 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.501077 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.501088 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:02Z","lastTransitionTime":"2025-10-11T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:02 crc kubenswrapper[5055]: E1011 06:54:02.515716 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.518902 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.518933 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.518941 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.518957 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.518969 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:02Z","lastTransitionTime":"2025-10-11T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:02 crc kubenswrapper[5055]: E1011 06:54:02.530974 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.537275 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.537374 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.537413 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.537432 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.537448 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:02Z","lastTransitionTime":"2025-10-11T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.548960 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: E1011 06:54:02.550714 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.553837 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.553868 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.553879 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.553892 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.553900 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:02Z","lastTransitionTime":"2025-10-11T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:02 crc kubenswrapper[5055]: E1011 06:54:02.563916 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.566737 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.566787 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.566798 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.566812 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.566821 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:02Z","lastTransitionTime":"2025-10-11T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:02 crc kubenswrapper[5055]: E1011 06:54:02.577529 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: E1011 06:54:02.577688 5055 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.579157 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.579189 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.579202 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.579218 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.579229 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:02Z","lastTransitionTime":"2025-10-11T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.586177 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.627822 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.665568 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:02Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.681088 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.681124 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.681136 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.681150 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.681159 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:02Z","lastTransitionTime":"2025-10-11T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.783869 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.783908 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.783918 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.783933 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.783945 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:02Z","lastTransitionTime":"2025-10-11T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.886370 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.886663 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.886737 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.886835 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.886904 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:02Z","lastTransitionTime":"2025-10-11T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.989636 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.989683 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.989696 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.989712 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.989723 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:02Z","lastTransitionTime":"2025-10-11T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.993037 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:02 crc kubenswrapper[5055]: E1011 06:54:02.993339 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:02 crc kubenswrapper[5055]: I1011 06:54:02.993038 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:02 crc kubenswrapper[5055]: E1011 06:54:02.993697 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.092010 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.092045 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.092055 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.092073 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.092084 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:03Z","lastTransitionTime":"2025-10-11T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.167047 5055 generic.go:334] "Generic (PLEG): container finished" podID="14178151-9e5c-49bd-941f-5178607a5ad4" containerID="c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05" exitCode=0 Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.167223 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" event={"ID":"14178151-9e5c-49bd-941f-5178607a5ad4","Type":"ContainerDied","Data":"c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05"} Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.198995 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.212038 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.212085 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.212094 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.212110 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.212122 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:03Z","lastTransitionTime":"2025-10-11T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.224053 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.245113 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.261759 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.288128 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.306045 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.316798 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.316844 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.316856 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.316875 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.316886 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:03Z","lastTransitionTime":"2025-10-11T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.319468 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.339151 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.363666 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.373813 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.385178 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.394291 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.405619 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.417219 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.418972 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.419012 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.419024 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.419039 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.419050 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:03Z","lastTransitionTime":"2025-10-11T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.428797 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.520735 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.520783 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.520794 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.520812 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.520822 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:03Z","lastTransitionTime":"2025-10-11T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.623526 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.623877 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.623886 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.623900 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.623931 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:03Z","lastTransitionTime":"2025-10-11T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.726607 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.726640 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.726648 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.726662 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.726672 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:03Z","lastTransitionTime":"2025-10-11T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.830299 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.830346 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.830358 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.830375 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.830391 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:03Z","lastTransitionTime":"2025-10-11T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.933280 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.933341 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.933359 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.933381 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.933397 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:03Z","lastTransitionTime":"2025-10-11T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:03 crc kubenswrapper[5055]: I1011 06:54:03.992967 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:03 crc kubenswrapper[5055]: E1011 06:54:03.993118 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.036707 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.036746 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.036756 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.036797 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.036808 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:04Z","lastTransitionTime":"2025-10-11T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.139499 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.139579 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.139600 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.139633 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.139663 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:04Z","lastTransitionTime":"2025-10-11T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.177565 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerStarted","Data":"0e4743ed093b44a32e30308f2aae418ed129a1e1d4b04981e1fd7f83ba6a4d62"} Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.178631 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.185387 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" event={"ID":"14178151-9e5c-49bd-941f-5178607a5ad4","Type":"ContainerStarted","Data":"c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342"} Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.203222 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.211281 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.223690 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.239196 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.242996 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.243052 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.243064 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.243080 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.243093 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:04Z","lastTransitionTime":"2025-10-11T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.265451 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.286636 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.301267 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.315222 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.336052 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e4743ed093b44a32e30308f2aae418ed129a1e1d4b04981e1fd7f83ba6a4d62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.346198 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.346238 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.346254 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.346275 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.346289 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:04Z","lastTransitionTime":"2025-10-11T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.367251 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.380573 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.399857 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.418264 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.431498 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.447947 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.449486 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.449519 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.449531 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.449551 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.449562 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:04Z","lastTransitionTime":"2025-10-11T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.460565 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.477137 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.495742 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.512643 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.527512 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.542061 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.552816 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.552886 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.552915 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.552934 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.552945 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:04Z","lastTransitionTime":"2025-10-11T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.555549 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.559160 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.559253 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.559310 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:54:12.559291102 +0000 UTC m=+36.333564919 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.559316 5055 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.559350 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:12.559341933 +0000 UTC m=+36.333615740 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.559367 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.559467 5055 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.559515 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:12.559504808 +0000 UTC m=+36.333778625 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.574808 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e4743ed093b44a32e30308f2aae418ed129a1e1d4b04981e1fd7f83ba6a4d62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.587680 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.598973 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.618302 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.634527 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.647491 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.655413 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.655452 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.655463 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.655477 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.655487 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:04Z","lastTransitionTime":"2025-10-11T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.660019 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.660065 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.660200 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.660227 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.660240 5055 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.660281 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:12.66026704 +0000 UTC m=+36.434540847 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.660705 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.660795 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.660819 5055 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.660912 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:12.660886578 +0000 UTC m=+36.435160395 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.662189 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.681294 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.694674 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.759996 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.760057 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.760088 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.760116 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.760137 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:04Z","lastTransitionTime":"2025-10-11T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.862392 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.862469 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.862489 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.862511 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.862528 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:04Z","lastTransitionTime":"2025-10-11T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.964606 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.964660 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.964675 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.964695 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.964710 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:04Z","lastTransitionTime":"2025-10-11T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.993211 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.993353 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:04 crc kubenswrapper[5055]: I1011 06:54:04.993463 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:04 crc kubenswrapper[5055]: E1011 06:54:04.993697 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.066660 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.066732 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.066754 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.066816 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.066840 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:05Z","lastTransitionTime":"2025-10-11T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.169474 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.169518 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.169534 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.169555 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.169572 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:05Z","lastTransitionTime":"2025-10-11T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.189163 5055 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.189810 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.273541 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.273617 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.273641 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.273671 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.273693 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:05Z","lastTransitionTime":"2025-10-11T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.278951 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.310602 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e4743ed093b44a32e30308f2aae418ed129a1e1d4b04981e1fd7f83ba6a4d62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.328376 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.348302 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.377828 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.377905 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.377927 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.377957 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.377979 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:05Z","lastTransitionTime":"2025-10-11T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.385680 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.403197 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.423337 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.445244 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.465427 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.481871 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.481944 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.481969 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.481999 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.482022 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:05Z","lastTransitionTime":"2025-10-11T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.487002 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.507290 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.526168 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.537756 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.550847 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.568565 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.584132 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.584176 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.584188 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.584208 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.584221 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:05Z","lastTransitionTime":"2025-10-11T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.587848 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.685860 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.685893 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.685902 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.685919 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.685928 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:05Z","lastTransitionTime":"2025-10-11T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.758284 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.788473 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.788516 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.788527 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.788546 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.788558 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:05Z","lastTransitionTime":"2025-10-11T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.891447 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.891487 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.891500 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.891516 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.891530 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:05Z","lastTransitionTime":"2025-10-11T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.992866 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:05 crc kubenswrapper[5055]: E1011 06:54:05.993097 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.994375 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.994417 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.994425 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.994441 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:05 crc kubenswrapper[5055]: I1011 06:54:05.994451 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:05Z","lastTransitionTime":"2025-10-11T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.096898 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.096970 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.096980 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.096994 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.097004 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:06Z","lastTransitionTime":"2025-10-11T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.194016 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovnkube-controller/0.log" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.196813 5055 generic.go:334] "Generic (PLEG): container finished" podID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerID="0e4743ed093b44a32e30308f2aae418ed129a1e1d4b04981e1fd7f83ba6a4d62" exitCode=1 Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.196859 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerDied","Data":"0e4743ed093b44a32e30308f2aae418ed129a1e1d4b04981e1fd7f83ba6a4d62"} Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.198003 5055 scope.go:117] "RemoveContainer" containerID="0e4743ed093b44a32e30308f2aae418ed129a1e1d4b04981e1fd7f83ba6a4d62" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.198728 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.198756 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.198793 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.198813 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.198828 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:06Z","lastTransitionTime":"2025-10-11T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.220852 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.236617 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.252682 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.265364 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.280099 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.297576 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.302073 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.302103 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.302112 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.302124 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.302132 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:06Z","lastTransitionTime":"2025-10-11T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.309616 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.324274 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.338990 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.351422 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.367047 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.378304 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.395074 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.404608 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.404642 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.404650 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.404662 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.404670 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:06Z","lastTransitionTime":"2025-10-11T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.406138 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.425382 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e4743ed093b44a32e30308f2aae418ed129a1e1d4b04981e1fd7f83ba6a4d62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e4743ed093b44a32e30308f2aae418ed129a1e1d4b04981e1fd7f83ba6a4d62\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:06Z\\\",\\\"message\\\":\\\") from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.137710 6348 handler.go:208] Removed *v1.Node event handler 7\\\\nI1011 06:54:06.137717 6348 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1011 06:54:06.137726 6348 handler.go:208] Removed *v1.Node event handler 2\\\\nI1011 06:54:06.137889 6348 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.137924 6348 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1011 06:54:06.137934 6348 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1011 06:54:06.137980 6348 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.138086 6348 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.138351 6348 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1011 06:54:06.138882 6348 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1011 06:54:06.138916 6348 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.507661 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.507739 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.507747 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.507763 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.507792 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:06Z","lastTransitionTime":"2025-10-11T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.609813 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.609853 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.609861 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.609874 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.609883 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:06Z","lastTransitionTime":"2025-10-11T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.711875 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.711913 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.711922 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.711937 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.711948 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:06Z","lastTransitionTime":"2025-10-11T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.814418 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.814452 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.814469 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.814488 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.814499 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:06Z","lastTransitionTime":"2025-10-11T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.916687 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.916726 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.916734 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.916746 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.916755 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:06Z","lastTransitionTime":"2025-10-11T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.992406 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:06 crc kubenswrapper[5055]: I1011 06:54:06.992458 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:06 crc kubenswrapper[5055]: E1011 06:54:06.992535 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:06 crc kubenswrapper[5055]: E1011 06:54:06.992609 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.009439 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.019277 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.019476 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.019549 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.019576 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.019606 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.019623 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:07Z","lastTransitionTime":"2025-10-11T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.031232 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.043172 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.054264 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.067659 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.079326 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.089687 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.101360 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.114445 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.122035 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.122090 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.122107 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.122129 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.122145 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:07Z","lastTransitionTime":"2025-10-11T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.129156 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.139295 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.152133 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.164449 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.184305 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e4743ed093b44a32e30308f2aae418ed129a1e1d4b04981e1fd7f83ba6a4d62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e4743ed093b44a32e30308f2aae418ed129a1e1d4b04981e1fd7f83ba6a4d62\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:06Z\\\",\\\"message\\\":\\\") from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.137710 6348 handler.go:208] Removed *v1.Node event handler 7\\\\nI1011 06:54:06.137717 6348 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1011 06:54:06.137726 6348 handler.go:208] Removed *v1.Node event handler 2\\\\nI1011 06:54:06.137889 6348 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.137924 6348 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1011 06:54:06.137934 6348 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1011 06:54:06.137980 6348 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.138086 6348 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.138351 6348 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1011 06:54:06.138882 6348 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1011 06:54:06.138916 6348 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.202547 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovnkube-controller/1.log" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.203506 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovnkube-controller/0.log" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.207348 5055 generic.go:334] "Generic (PLEG): container finished" podID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerID="4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a" exitCode=1 Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.207393 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerDied","Data":"4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a"} Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.207458 5055 scope.go:117] "RemoveContainer" containerID="0e4743ed093b44a32e30308f2aae418ed129a1e1d4b04981e1fd7f83ba6a4d62" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.208230 5055 scope.go:117] "RemoveContainer" containerID="4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a" Oct 11 06:54:07 crc kubenswrapper[5055]: E1011 06:54:07.208412 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.224648 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.224677 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.224686 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.224700 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.224709 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:07Z","lastTransitionTime":"2025-10-11T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.233175 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.243938 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.255603 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.265844 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.274541 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.283975 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.293894 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.305090 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.317126 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.327636 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.327798 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.327837 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.327852 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.327870 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.327883 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:07Z","lastTransitionTime":"2025-10-11T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.345174 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.356285 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.370513 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.382746 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.400306 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e4743ed093b44a32e30308f2aae418ed129a1e1d4b04981e1fd7f83ba6a4d62\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:06Z\\\",\\\"message\\\":\\\") from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.137710 6348 handler.go:208] Removed *v1.Node event handler 7\\\\nI1011 06:54:06.137717 6348 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1011 06:54:06.137726 6348 handler.go:208] Removed *v1.Node event handler 2\\\\nI1011 06:54:06.137889 6348 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.137924 6348 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1011 06:54:06.137934 6348 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1011 06:54:06.137980 6348 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.138086 6348 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.138351 6348 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1011 06:54:06.138882 6348 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1011 06:54:06.138916 6348 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:07Z\\\",\\\"message\\\":\\\"06:54:06.975505 6485 services_controller.go:445] Built service openshift-network-diagnostics/network-check-target LB template configs for network=default: []services.lbConfig(nil)\\\\nI1011 06:54:06.975512 6485 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1011 06:54:06.975519 6485 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nF1011 06:54:06.975523 6485 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z]\\\\nI1011 06:54:06.975528 6\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.430235 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.430290 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.430306 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.430331 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.430348 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:07Z","lastTransitionTime":"2025-10-11T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.533397 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.533501 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.533517 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.533534 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.533545 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:07Z","lastTransitionTime":"2025-10-11T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.636139 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.636180 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.636192 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.636236 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.636250 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:07Z","lastTransitionTime":"2025-10-11T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.739345 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.739395 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.739419 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.739440 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.739455 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:07Z","lastTransitionTime":"2025-10-11T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.842249 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.842589 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.842693 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.842824 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.842959 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:07Z","lastTransitionTime":"2025-10-11T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.945973 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.946325 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.946470 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.946613 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.946748 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:07Z","lastTransitionTime":"2025-10-11T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:07 crc kubenswrapper[5055]: I1011 06:54:07.993457 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:07 crc kubenswrapper[5055]: E1011 06:54:07.993662 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.050178 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.050240 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.050260 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.050286 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.050304 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:08Z","lastTransitionTime":"2025-10-11T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.089758 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.104032 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.118819 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.132088 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.145872 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.152881 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.152917 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.152933 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.152948 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.152957 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:08Z","lastTransitionTime":"2025-10-11T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.156881 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.174341 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.187537 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.201051 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.213064 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovnkube-controller/1.log" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.219013 5055 scope.go:117] "RemoveContainer" containerID="4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a" Oct 11 06:54:08 crc kubenswrapper[5055]: E1011 06:54:08.220513 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.222483 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.236654 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.250335 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.255194 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.255231 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.255242 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.255257 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.255269 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:08Z","lastTransitionTime":"2025-10-11T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.268062 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.288857 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e4743ed093b44a32e30308f2aae418ed129a1e1d4b04981e1fd7f83ba6a4d62\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:06Z\\\",\\\"message\\\":\\\") from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.137710 6348 handler.go:208] Removed *v1.Node event handler 7\\\\nI1011 06:54:06.137717 6348 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1011 06:54:06.137726 6348 handler.go:208] Removed *v1.Node event handler 2\\\\nI1011 06:54:06.137889 6348 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.137924 6348 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1011 06:54:06.137934 6348 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1011 06:54:06.137980 6348 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.138086 6348 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1011 06:54:06.138351 6348 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1011 06:54:06.138882 6348 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1011 06:54:06.138916 6348 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:07Z\\\",\\\"message\\\":\\\"06:54:06.975505 6485 services_controller.go:445] Built service openshift-network-diagnostics/network-check-target LB template configs for network=default: []services.lbConfig(nil)\\\\nI1011 06:54:06.975512 6485 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1011 06:54:06.975519 6485 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nF1011 06:54:06.975523 6485 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z]\\\\nI1011 06:54:06.975528 6\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.310493 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.321633 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.337717 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.350971 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.357465 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.357501 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.357517 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.357537 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.357551 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:08Z","lastTransitionTime":"2025-10-11T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.366324 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.379432 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.393663 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.412522 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:07Z\\\",\\\"message\\\":\\\"06:54:06.975505 6485 services_controller.go:445] Built service openshift-network-diagnostics/network-check-target LB template configs for network=default: []services.lbConfig(nil)\\\\nI1011 06:54:06.975512 6485 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1011 06:54:06.975519 6485 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nF1011 06:54:06.975523 6485 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z]\\\\nI1011 06:54:06.975528 6\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.427204 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.438844 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.459328 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.459379 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.459401 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.459424 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.459440 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:08Z","lastTransitionTime":"2025-10-11T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.462584 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.473422 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.485957 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.497403 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.514071 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.527466 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.540170 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:08Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.562055 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.562092 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.562103 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.562120 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.562132 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:08Z","lastTransitionTime":"2025-10-11T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.664909 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.664987 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.665011 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.665041 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.665064 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:08Z","lastTransitionTime":"2025-10-11T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.767841 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.767881 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.767892 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.767908 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.767919 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:08Z","lastTransitionTime":"2025-10-11T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.870799 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.870843 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.870853 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.870869 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.870888 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:08Z","lastTransitionTime":"2025-10-11T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.973503 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.973533 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.973541 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.973554 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.973563 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:08Z","lastTransitionTime":"2025-10-11T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.992824 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:08 crc kubenswrapper[5055]: E1011 06:54:08.993074 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:08 crc kubenswrapper[5055]: I1011 06:54:08.993445 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:08 crc kubenswrapper[5055]: E1011 06:54:08.993542 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.076427 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.076461 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.076468 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.076482 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.076490 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:09Z","lastTransitionTime":"2025-10-11T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.182424 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.182498 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.182520 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.182550 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.182570 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:09Z","lastTransitionTime":"2025-10-11T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.285350 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.285415 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.285439 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.285471 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.285497 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:09Z","lastTransitionTime":"2025-10-11T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.331492 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh"] Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.332011 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.334055 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.334358 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.346875 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.366334 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.379294 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.387269 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.387312 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.387320 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.387335 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.387344 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:09Z","lastTransitionTime":"2025-10-11T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.396355 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.410446 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-h2gdh\" (UID: \"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.410504 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-h2gdh\" (UID: \"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.410536 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkqnf\" (UniqueName: \"kubernetes.io/projected/5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437-kube-api-access-kkqnf\") pod \"ovnkube-control-plane-749d76644c-h2gdh\" (UID: \"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.410629 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437-env-overrides\") pod \"ovnkube-control-plane-749d76644c-h2gdh\" (UID: \"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.411187 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.428907 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.440882 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.454909 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.470905 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.486291 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.489711 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.489850 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.490046 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.490086 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.490105 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:09Z","lastTransitionTime":"2025-10-11T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.496987 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.507536 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.511279 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-h2gdh\" (UID: \"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.511354 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-h2gdh\" (UID: \"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.511406 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkqnf\" (UniqueName: \"kubernetes.io/projected/5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437-kube-api-access-kkqnf\") pod \"ovnkube-control-plane-749d76644c-h2gdh\" (UID: \"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.511520 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437-env-overrides\") pod \"ovnkube-control-plane-749d76644c-h2gdh\" (UID: \"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.511969 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-h2gdh\" (UID: \"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.511992 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437-env-overrides\") pod \"ovnkube-control-plane-749d76644c-h2gdh\" (UID: \"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.516481 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-h2gdh\" (UID: \"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.523832 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.525012 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkqnf\" (UniqueName: \"kubernetes.io/projected/5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437-kube-api-access-kkqnf\") pod \"ovnkube-control-plane-749d76644c-h2gdh\" (UID: \"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.546215 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:07Z\\\",\\\"message\\\":\\\"06:54:06.975505 6485 services_controller.go:445] Built service openshift-network-diagnostics/network-check-target LB template configs for network=default: []services.lbConfig(nil)\\\\nI1011 06:54:06.975512 6485 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1011 06:54:06.975519 6485 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nF1011 06:54:06.975523 6485 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z]\\\\nI1011 06:54:06.975528 6\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.557405 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.568675 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:09Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.592423 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.592577 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.592661 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.592740 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.592854 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:09Z","lastTransitionTime":"2025-10-11T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.645096 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.696433 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.696474 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.696491 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.696512 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.696528 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:09Z","lastTransitionTime":"2025-10-11T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.798940 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.798975 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.798994 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.799011 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.799024 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:09Z","lastTransitionTime":"2025-10-11T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.901243 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.901276 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.901285 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.901324 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.901335 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:09Z","lastTransitionTime":"2025-10-11T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:09 crc kubenswrapper[5055]: I1011 06:54:09.992518 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:09 crc kubenswrapper[5055]: E1011 06:54:09.992639 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.004112 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.004141 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.004152 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.004166 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.004176 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:10Z","lastTransitionTime":"2025-10-11T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.107136 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.107192 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.107204 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.107222 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.107240 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:10Z","lastTransitionTime":"2025-10-11T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.210413 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.210545 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.210566 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.210591 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.210639 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:10Z","lastTransitionTime":"2025-10-11T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.224624 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" event={"ID":"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437","Type":"ContainerStarted","Data":"4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93"} Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.224727 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" event={"ID":"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437","Type":"ContainerStarted","Data":"1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a"} Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.224805 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" event={"ID":"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437","Type":"ContainerStarted","Data":"adaa9f898a37c7c562a90342264d21c49857b614834e4ee0a360949dd24b7f2d"} Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.240710 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.259296 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.272369 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.288948 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.301526 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.313499 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.313556 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.313574 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.313597 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.313616 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:10Z","lastTransitionTime":"2025-10-11T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.316953 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.331923 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.347584 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.365533 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.377365 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.392884 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.406503 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.416656 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.416709 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.416723 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.416741 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.416753 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:10Z","lastTransitionTime":"2025-10-11T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.436032 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:07Z\\\",\\\"message\\\":\\\"06:54:06.975505 6485 services_controller.go:445] Built service openshift-network-diagnostics/network-check-target LB template configs for network=default: []services.lbConfig(nil)\\\\nI1011 06:54:06.975512 6485 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1011 06:54:06.975519 6485 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nF1011 06:54:06.975523 6485 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z]\\\\nI1011 06:54:06.975528 6\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.448471 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.477910 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.489660 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.520170 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.520278 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.520306 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.520337 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.520361 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:10Z","lastTransitionTime":"2025-10-11T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.624325 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.624393 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.624411 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.624437 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.624457 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:10Z","lastTransitionTime":"2025-10-11T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.727492 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.727531 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.727542 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.727559 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.727574 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:10Z","lastTransitionTime":"2025-10-11T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.796640 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-glhzm"] Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.797347 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:10 crc kubenswrapper[5055]: E1011 06:54:10.797435 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.813713 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.830762 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.830841 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.830855 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.830880 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.830899 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:10Z","lastTransitionTime":"2025-10-11T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.836464 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.857757 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:07Z\\\",\\\"message\\\":\\\"06:54:06.975505 6485 services_controller.go:445] Built service openshift-network-diagnostics/network-check-target LB template configs for network=default: []services.lbConfig(nil)\\\\nI1011 06:54:06.975512 6485 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1011 06:54:06.975519 6485 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nF1011 06:54:06.975523 6485 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z]\\\\nI1011 06:54:06.975528 6\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.870615 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.881116 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.905024 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.914988 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.924885 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkbc9\" (UniqueName: \"kubernetes.io/projected/c7861c5b-622e-4cce-9360-be9885299bd4-kube-api-access-hkbc9\") pod \"network-metrics-daemon-glhzm\" (UID: \"c7861c5b-622e-4cce-9360-be9885299bd4\") " pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.924961 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs\") pod \"network-metrics-daemon-glhzm\" (UID: \"c7861c5b-622e-4cce-9360-be9885299bd4\") " pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.927921 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.933405 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.933458 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.933476 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.933500 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.933516 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:10Z","lastTransitionTime":"2025-10-11T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.941341 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.954057 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.971316 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.984371 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:10Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.993400 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:10 crc kubenswrapper[5055]: E1011 06:54:10.993522 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:10 crc kubenswrapper[5055]: I1011 06:54:10.993588 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:10 crc kubenswrapper[5055]: E1011 06:54:10.993857 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.002178 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:11Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.015909 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:11Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.026435 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs\") pod \"network-metrics-daemon-glhzm\" (UID: \"c7861c5b-622e-4cce-9360-be9885299bd4\") " pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.026531 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkbc9\" (UniqueName: \"kubernetes.io/projected/c7861c5b-622e-4cce-9360-be9885299bd4-kube-api-access-hkbc9\") pod \"network-metrics-daemon-glhzm\" (UID: \"c7861c5b-622e-4cce-9360-be9885299bd4\") " pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:11 crc kubenswrapper[5055]: E1011 06:54:11.026679 5055 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:54:11 crc kubenswrapper[5055]: E1011 06:54:11.026760 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs podName:c7861c5b-622e-4cce-9360-be9885299bd4 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:11.526737381 +0000 UTC m=+35.301011208 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs") pod "network-metrics-daemon-glhzm" (UID: "c7861c5b-622e-4cce-9360-be9885299bd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.030099 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:11Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.036463 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.036517 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.036537 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.036560 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.036577 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:11Z","lastTransitionTime":"2025-10-11T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.054509 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:11Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.054621 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkbc9\" (UniqueName: \"kubernetes.io/projected/c7861c5b-622e-4cce-9360-be9885299bd4-kube-api-access-hkbc9\") pod \"network-metrics-daemon-glhzm\" (UID: \"c7861c5b-622e-4cce-9360-be9885299bd4\") " pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.067208 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:11Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.140292 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.140381 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.140409 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.140444 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.140473 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:11Z","lastTransitionTime":"2025-10-11T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.243807 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.243867 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.243886 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.243911 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.243930 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:11Z","lastTransitionTime":"2025-10-11T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.346443 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.346507 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.346524 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.346549 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.346569 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:11Z","lastTransitionTime":"2025-10-11T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.449068 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.449132 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.449151 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.449176 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.449194 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:11Z","lastTransitionTime":"2025-10-11T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.532437 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs\") pod \"network-metrics-daemon-glhzm\" (UID: \"c7861c5b-622e-4cce-9360-be9885299bd4\") " pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:11 crc kubenswrapper[5055]: E1011 06:54:11.532624 5055 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:54:11 crc kubenswrapper[5055]: E1011 06:54:11.532719 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs podName:c7861c5b-622e-4cce-9360-be9885299bd4 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:12.532696503 +0000 UTC m=+36.306970350 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs") pod "network-metrics-daemon-glhzm" (UID: "c7861c5b-622e-4cce-9360-be9885299bd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.551876 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.551936 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.551956 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.551982 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.552000 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:11Z","lastTransitionTime":"2025-10-11T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.655799 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.656443 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.656569 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.656667 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.656806 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:11Z","lastTransitionTime":"2025-10-11T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.759550 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.759597 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.759609 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.759627 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.759641 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:11Z","lastTransitionTime":"2025-10-11T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.862919 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.862961 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.862971 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.862985 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.862996 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:11Z","lastTransitionTime":"2025-10-11T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.965891 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.965931 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.965942 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.965957 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.965965 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:11Z","lastTransitionTime":"2025-10-11T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.993332 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:11 crc kubenswrapper[5055]: I1011 06:54:11.993359 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:11 crc kubenswrapper[5055]: E1011 06:54:11.993546 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:11 crc kubenswrapper[5055]: E1011 06:54:11.993682 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.068542 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.068817 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.068899 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.068992 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.069079 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:12Z","lastTransitionTime":"2025-10-11T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.170548 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.170814 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.170935 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.171145 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.171290 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:12Z","lastTransitionTime":"2025-10-11T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.274164 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.274247 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.274274 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.274310 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.274335 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:12Z","lastTransitionTime":"2025-10-11T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.377093 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.377159 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.377183 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.377208 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.377225 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:12Z","lastTransitionTime":"2025-10-11T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.480150 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.480203 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.480219 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.480242 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.480259 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:12Z","lastTransitionTime":"2025-10-11T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.544728 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs\") pod \"network-metrics-daemon-glhzm\" (UID: \"c7861c5b-622e-4cce-9360-be9885299bd4\") " pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.544997 5055 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.545130 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs podName:c7861c5b-622e-4cce-9360-be9885299bd4 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:14.545092653 +0000 UTC m=+38.319366520 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs") pod "network-metrics-daemon-glhzm" (UID: "c7861c5b-622e-4cce-9360-be9885299bd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.584384 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.584445 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.584462 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.584481 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.584494 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:12Z","lastTransitionTime":"2025-10-11T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.593100 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.593157 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.593174 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.593199 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.593216 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:12Z","lastTransitionTime":"2025-10-11T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.615261 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:12Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.621134 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.621219 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.621262 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.621296 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.621319 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:12Z","lastTransitionTime":"2025-10-11T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.642643 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:12Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.645869 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.646116 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.646168 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:54:28.646138973 +0000 UTC m=+52.420412820 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.646250 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.646284 5055 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.646360 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:28.646334659 +0000 UTC m=+52.420608506 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.646374 5055 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.646428 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:28.646413731 +0000 UTC m=+52.420687568 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.649632 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.649705 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.649734 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.649797 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.649825 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:12Z","lastTransitionTime":"2025-10-11T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.669746 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:12Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.674637 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.674670 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.674680 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.674696 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.674708 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:12Z","lastTransitionTime":"2025-10-11T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.692750 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:12Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.697092 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.697137 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.697152 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.697172 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.697185 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:12Z","lastTransitionTime":"2025-10-11T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.717084 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:12Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.717287 5055 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.719348 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.719396 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.719412 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.719433 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.719448 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:12Z","lastTransitionTime":"2025-10-11T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.747145 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.747274 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.747278 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.747328 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.747347 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.747360 5055 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.747407 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:28.747388999 +0000 UTC m=+52.521662816 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.747328 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.747439 5055 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.747484 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:28.747469192 +0000 UTC m=+52.521743009 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.821395 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.821433 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.821444 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.821461 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.821473 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:12Z","lastTransitionTime":"2025-10-11T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.925486 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.925564 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.925601 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.925630 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.925649 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:12Z","lastTransitionTime":"2025-10-11T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.993450 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:12 crc kubenswrapper[5055]: I1011 06:54:12.993528 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.993650 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:12 crc kubenswrapper[5055]: E1011 06:54:12.993883 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.028500 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.028554 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.028571 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.028594 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.028612 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:13Z","lastTransitionTime":"2025-10-11T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.131644 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.131955 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.132023 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.132092 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.132172 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:13Z","lastTransitionTime":"2025-10-11T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.235275 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.235351 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.235375 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.235403 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.235425 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:13Z","lastTransitionTime":"2025-10-11T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.339178 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.339241 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.339259 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.339315 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.339337 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:13Z","lastTransitionTime":"2025-10-11T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.443323 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.443447 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.443470 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.443496 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.443522 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:13Z","lastTransitionTime":"2025-10-11T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.546276 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.546353 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.546376 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.546408 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.546432 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:13Z","lastTransitionTime":"2025-10-11T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.649873 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.649911 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.649922 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.649940 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.649950 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:13Z","lastTransitionTime":"2025-10-11T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.753042 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.753102 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.753117 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.753138 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.753156 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:13Z","lastTransitionTime":"2025-10-11T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.863593 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.863883 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.863903 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.863943 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.863956 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:13Z","lastTransitionTime":"2025-10-11T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.966575 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.966609 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.966620 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.966638 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.966652 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:13Z","lastTransitionTime":"2025-10-11T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.992462 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:13 crc kubenswrapper[5055]: I1011 06:54:13.992496 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:13 crc kubenswrapper[5055]: E1011 06:54:13.992567 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:13 crc kubenswrapper[5055]: E1011 06:54:13.992668 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.069042 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.069087 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.069099 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.069117 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.069131 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:14Z","lastTransitionTime":"2025-10-11T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.171557 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.171611 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.171626 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.171644 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.171656 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:14Z","lastTransitionTime":"2025-10-11T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.274798 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.274888 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.274914 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.274948 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.274975 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:14Z","lastTransitionTime":"2025-10-11T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.377922 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.378096 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.378119 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.378140 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.378155 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:14Z","lastTransitionTime":"2025-10-11T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.482599 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.482711 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.482736 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.482798 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.482825 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:14Z","lastTransitionTime":"2025-10-11T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.571073 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs\") pod \"network-metrics-daemon-glhzm\" (UID: \"c7861c5b-622e-4cce-9360-be9885299bd4\") " pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:14 crc kubenswrapper[5055]: E1011 06:54:14.571409 5055 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:54:14 crc kubenswrapper[5055]: E1011 06:54:14.571621 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs podName:c7861c5b-622e-4cce-9360-be9885299bd4 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:18.57158994 +0000 UTC m=+42.345863787 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs") pod "network-metrics-daemon-glhzm" (UID: "c7861c5b-622e-4cce-9360-be9885299bd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.586845 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.586943 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.586968 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.587036 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.587062 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:14Z","lastTransitionTime":"2025-10-11T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.691441 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.691549 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.691594 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.691629 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.691677 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:14Z","lastTransitionTime":"2025-10-11T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.795315 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.795374 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.795400 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.795430 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.795452 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:14Z","lastTransitionTime":"2025-10-11T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.897744 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.897813 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.897825 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.897837 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.897848 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:14Z","lastTransitionTime":"2025-10-11T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.993385 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:14 crc kubenswrapper[5055]: E1011 06:54:14.993743 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.993391 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:14 crc kubenswrapper[5055]: E1011 06:54:14.994079 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.999247 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.999418 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.999503 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.999584 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:14 crc kubenswrapper[5055]: I1011 06:54:14.999676 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:14Z","lastTransitionTime":"2025-10-11T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.102555 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.102596 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.102612 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.102633 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.102647 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:15Z","lastTransitionTime":"2025-10-11T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.205178 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.205214 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.205228 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.205249 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.205263 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:15Z","lastTransitionTime":"2025-10-11T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.307521 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.307573 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.307583 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.307595 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.307604 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:15Z","lastTransitionTime":"2025-10-11T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.410011 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.410070 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.410078 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.410091 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.410100 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:15Z","lastTransitionTime":"2025-10-11T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.511895 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.511932 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.511940 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.511953 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.511962 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:15Z","lastTransitionTime":"2025-10-11T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.614935 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.614991 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.615001 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.615014 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.615023 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:15Z","lastTransitionTime":"2025-10-11T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.718037 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.718098 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.718107 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.718122 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.718132 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:15Z","lastTransitionTime":"2025-10-11T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.821363 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.821401 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.821412 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.821432 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.821469 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:15Z","lastTransitionTime":"2025-10-11T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.924907 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.924966 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.924982 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.925006 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.925023 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:15Z","lastTransitionTime":"2025-10-11T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.993221 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:15 crc kubenswrapper[5055]: I1011 06:54:15.993234 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:15 crc kubenswrapper[5055]: E1011 06:54:15.993449 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:15 crc kubenswrapper[5055]: E1011 06:54:15.993483 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.027877 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.027958 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.027991 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.028012 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.028028 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:16Z","lastTransitionTime":"2025-10-11T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.131437 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.131504 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.131542 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.131574 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.131597 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:16Z","lastTransitionTime":"2025-10-11T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.233561 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.233615 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.233629 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.233645 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.233656 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:16Z","lastTransitionTime":"2025-10-11T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.335697 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.335787 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.335798 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.335813 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.335828 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:16Z","lastTransitionTime":"2025-10-11T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.442216 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.442520 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.442654 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.442814 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.442950 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:16Z","lastTransitionTime":"2025-10-11T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.545208 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.545254 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.545266 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.545283 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.545294 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:16Z","lastTransitionTime":"2025-10-11T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.647933 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.647979 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.647992 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.648007 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.648020 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:16Z","lastTransitionTime":"2025-10-11T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.750535 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.750810 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.750892 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.750989 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.751085 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:16Z","lastTransitionTime":"2025-10-11T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.854112 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.854546 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.854697 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.854876 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.855023 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:16Z","lastTransitionTime":"2025-10-11T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.958347 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.958391 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.958400 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.958414 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.958426 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:16Z","lastTransitionTime":"2025-10-11T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.992727 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:16 crc kubenswrapper[5055]: I1011 06:54:16.992797 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:16 crc kubenswrapper[5055]: E1011 06:54:16.992908 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:16 crc kubenswrapper[5055]: E1011 06:54:16.993009 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.005676 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.017383 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.033048 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.054968 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.060337 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.060517 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.060667 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.060800 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.061040 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:17Z","lastTransitionTime":"2025-10-11T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.070830 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.085670 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.096997 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.136731 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:07Z\\\",\\\"message\\\":\\\"06:54:06.975505 6485 services_controller.go:445] Built service openshift-network-diagnostics/network-check-target LB template configs for network=default: []services.lbConfig(nil)\\\\nI1011 06:54:06.975512 6485 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1011 06:54:06.975519 6485 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nF1011 06:54:06.975523 6485 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z]\\\\nI1011 06:54:06.975528 6\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.163959 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.164001 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.164013 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.164028 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.164040 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:17Z","lastTransitionTime":"2025-10-11T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.165448 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.179554 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.202173 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.215960 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.229129 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.242476 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.252743 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.266874 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.266906 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.266914 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.266927 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.266937 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:17Z","lastTransitionTime":"2025-10-11T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.267071 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.280371 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.369362 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.369409 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.369453 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.369469 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.369482 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:17Z","lastTransitionTime":"2025-10-11T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.471396 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.471626 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.471640 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.471656 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.471666 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:17Z","lastTransitionTime":"2025-10-11T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.573382 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.573407 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.573417 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.573430 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.573440 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:17Z","lastTransitionTime":"2025-10-11T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.675461 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.675499 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.675509 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.675524 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.675532 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:17Z","lastTransitionTime":"2025-10-11T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.779108 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.779141 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.779149 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.779163 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.779173 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:17Z","lastTransitionTime":"2025-10-11T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.882256 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.882298 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.882311 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.882331 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.882344 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:17Z","lastTransitionTime":"2025-10-11T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.985459 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.985520 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.985532 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.985553 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.985567 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:17Z","lastTransitionTime":"2025-10-11T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.992735 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:17 crc kubenswrapper[5055]: I1011 06:54:17.992791 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:17 crc kubenswrapper[5055]: E1011 06:54:17.993153 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:17 crc kubenswrapper[5055]: E1011 06:54:17.993433 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.088729 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.088832 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.088852 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.088879 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.088900 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:18Z","lastTransitionTime":"2025-10-11T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.191687 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.191734 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.191744 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.191761 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.191795 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:18Z","lastTransitionTime":"2025-10-11T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.294382 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.294459 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.294477 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.294506 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.294524 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:18Z","lastTransitionTime":"2025-10-11T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.397430 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.397489 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.397506 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.397527 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.397542 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:18Z","lastTransitionTime":"2025-10-11T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.501326 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.501372 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.501384 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.501405 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.501420 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:18Z","lastTransitionTime":"2025-10-11T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.604690 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.604807 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.604830 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.604863 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.604886 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:18Z","lastTransitionTime":"2025-10-11T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.609460 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs\") pod \"network-metrics-daemon-glhzm\" (UID: \"c7861c5b-622e-4cce-9360-be9885299bd4\") " pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:18 crc kubenswrapper[5055]: E1011 06:54:18.609679 5055 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:54:18 crc kubenswrapper[5055]: E1011 06:54:18.609888 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs podName:c7861c5b-622e-4cce-9360-be9885299bd4 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:26.609849593 +0000 UTC m=+50.384123440 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs") pod "network-metrics-daemon-glhzm" (UID: "c7861c5b-622e-4cce-9360-be9885299bd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.708328 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.708396 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.708414 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.708441 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.708460 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:18Z","lastTransitionTime":"2025-10-11T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.812893 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.812995 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.813025 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.813067 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.813095 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:18Z","lastTransitionTime":"2025-10-11T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.916521 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.916571 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.916589 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.916609 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.916622 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:18Z","lastTransitionTime":"2025-10-11T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.993216 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:18 crc kubenswrapper[5055]: E1011 06:54:18.993419 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:18 crc kubenswrapper[5055]: I1011 06:54:18.993227 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:18 crc kubenswrapper[5055]: E1011 06:54:18.993751 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.020004 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.020054 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.020070 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.020090 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.020147 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:19Z","lastTransitionTime":"2025-10-11T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.124801 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.124843 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.124852 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.124868 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.124879 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:19Z","lastTransitionTime":"2025-10-11T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.228281 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.228712 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.229152 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.229360 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.229549 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:19Z","lastTransitionTime":"2025-10-11T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.332837 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.333060 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.333120 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.333160 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.333188 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:19Z","lastTransitionTime":"2025-10-11T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.435711 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.435785 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.435798 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.435817 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.435831 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:19Z","lastTransitionTime":"2025-10-11T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.539323 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.539639 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.539726 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.539858 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.539960 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:19Z","lastTransitionTime":"2025-10-11T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.643541 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.643835 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.643855 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.643881 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.643899 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:19Z","lastTransitionTime":"2025-10-11T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.747485 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.747721 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.747745 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.747814 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.747842 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:19Z","lastTransitionTime":"2025-10-11T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.852521 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.852595 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.852618 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.852648 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.852674 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:19Z","lastTransitionTime":"2025-10-11T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.957598 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.957639 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.957649 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.957665 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.957677 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:19Z","lastTransitionTime":"2025-10-11T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.993252 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:19 crc kubenswrapper[5055]: I1011 06:54:19.993253 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:19 crc kubenswrapper[5055]: E1011 06:54:19.993386 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:19 crc kubenswrapper[5055]: E1011 06:54:19.993504 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.059289 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.059332 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.059350 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.059369 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.059383 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:20Z","lastTransitionTime":"2025-10-11T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.162065 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.162102 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.162118 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.162135 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.162145 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:20Z","lastTransitionTime":"2025-10-11T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.264408 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.264453 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.264466 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.264483 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.264492 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:20Z","lastTransitionTime":"2025-10-11T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.367789 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.367845 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.367873 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.367899 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.367924 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:20Z","lastTransitionTime":"2025-10-11T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.469834 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.469893 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.469903 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.469918 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.469926 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:20Z","lastTransitionTime":"2025-10-11T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.572575 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.572957 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.573146 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.573364 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.573632 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:20Z","lastTransitionTime":"2025-10-11T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.676223 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.676289 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.676307 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.676336 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.676355 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:20Z","lastTransitionTime":"2025-10-11T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.778734 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.779009 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.779135 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.779268 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.779362 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:20Z","lastTransitionTime":"2025-10-11T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.882336 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.882372 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.882380 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.882406 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.882416 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:20Z","lastTransitionTime":"2025-10-11T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.985051 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.985097 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.985113 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.985138 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.985155 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:20Z","lastTransitionTime":"2025-10-11T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.993132 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:20 crc kubenswrapper[5055]: E1011 06:54:20.993282 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:20 crc kubenswrapper[5055]: I1011 06:54:20.993368 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:20 crc kubenswrapper[5055]: E1011 06:54:20.993561 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.088065 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.088281 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.088343 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.088447 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.088517 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:21Z","lastTransitionTime":"2025-10-11T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.191287 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.191338 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.191357 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.191512 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.191557 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:21Z","lastTransitionTime":"2025-10-11T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.293694 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.293757 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.293806 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.293833 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.293857 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:21Z","lastTransitionTime":"2025-10-11T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.397279 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.397600 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.397744 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.398129 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.398347 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:21Z","lastTransitionTime":"2025-10-11T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.504641 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.504737 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.504792 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.504827 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.504860 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:21Z","lastTransitionTime":"2025-10-11T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.608054 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.608096 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.608106 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.608121 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.608130 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:21Z","lastTransitionTime":"2025-10-11T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.711320 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.711380 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.711398 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.711422 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.711440 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:21Z","lastTransitionTime":"2025-10-11T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.814599 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.814640 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.814652 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.814670 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.814683 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:21Z","lastTransitionTime":"2025-10-11T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.917441 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.917478 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.917487 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.917502 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.917513 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:21Z","lastTransitionTime":"2025-10-11T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.993432 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:21 crc kubenswrapper[5055]: I1011 06:54:21.993475 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:21 crc kubenswrapper[5055]: E1011 06:54:21.993611 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:21 crc kubenswrapper[5055]: E1011 06:54:21.993745 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.020335 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.020376 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.020385 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.020399 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.020409 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:22Z","lastTransitionTime":"2025-10-11T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.123075 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.123115 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.123128 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.123144 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.123157 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:22Z","lastTransitionTime":"2025-10-11T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.225438 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.225473 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.225502 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.225517 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.225525 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:22Z","lastTransitionTime":"2025-10-11T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.328203 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.328252 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.328263 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.328279 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.328288 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:22Z","lastTransitionTime":"2025-10-11T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.430805 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.430847 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.430859 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.430875 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.430888 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:22Z","lastTransitionTime":"2025-10-11T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.533378 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.533410 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.533418 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.533431 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.533440 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:22Z","lastTransitionTime":"2025-10-11T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.636963 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.637014 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.637030 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.637049 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.637062 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:22Z","lastTransitionTime":"2025-10-11T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.739462 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.739524 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.739549 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.739573 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.739591 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:22Z","lastTransitionTime":"2025-10-11T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.842574 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.842626 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.842635 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.842652 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.842664 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:22Z","lastTransitionTime":"2025-10-11T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.921019 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.921059 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.921076 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.921093 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.921106 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:22Z","lastTransitionTime":"2025-10-11T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:22 crc kubenswrapper[5055]: E1011 06:54:22.932976 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:22Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.937046 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.937077 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.937086 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.937100 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.937108 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:22Z","lastTransitionTime":"2025-10-11T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:22 crc kubenswrapper[5055]: E1011 06:54:22.952691 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:22Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.956706 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.957013 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.957046 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.957095 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.957122 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:22Z","lastTransitionTime":"2025-10-11T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:22 crc kubenswrapper[5055]: E1011 06:54:22.974903 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:22Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.978946 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.978980 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.978991 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.979009 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.979021 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:22Z","lastTransitionTime":"2025-10-11T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:22 crc kubenswrapper[5055]: E1011 06:54:22.990683 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:22Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.993105 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.993172 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:22 crc kubenswrapper[5055]: E1011 06:54:22.993276 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:22 crc kubenswrapper[5055]: E1011 06:54:22.993367 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.994501 5055 scope.go:117] "RemoveContainer" containerID="4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.994544 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.994581 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.994611 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.994641 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:22 crc kubenswrapper[5055]: I1011 06:54:22.994658 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:22Z","lastTransitionTime":"2025-10-11T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:23 crc kubenswrapper[5055]: E1011 06:54:23.015075 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: E1011 06:54:23.015476 5055 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.020016 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.020069 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.020088 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.020109 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.020123 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:23Z","lastTransitionTime":"2025-10-11T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.122231 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.122289 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.122306 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.122329 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.122342 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:23Z","lastTransitionTime":"2025-10-11T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.225406 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.225443 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.225454 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.225469 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.225479 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:23Z","lastTransitionTime":"2025-10-11T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.269154 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovnkube-controller/1.log" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.272586 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerStarted","Data":"457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a"} Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.273081 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.287419 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.301836 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.314260 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.327398 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.327428 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.327439 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.327453 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.327465 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:23Z","lastTransitionTime":"2025-10-11T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.332493 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:07Z\\\",\\\"message\\\":\\\"06:54:06.975505 6485 services_controller.go:445] Built service openshift-network-diagnostics/network-check-target LB template configs for network=default: []services.lbConfig(nil)\\\\nI1011 06:54:06.975512 6485 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1011 06:54:06.975519 6485 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nF1011 06:54:06.975523 6485 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z]\\\\nI1011 06:54:06.975528 6\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.343464 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.364417 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.373457 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.381429 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.392376 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.406028 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.416500 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.428583 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.429602 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.429636 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.429651 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.429669 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.429681 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:23Z","lastTransitionTime":"2025-10-11T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.444114 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.461010 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.479978 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.500468 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.518511 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:23Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.532222 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.532256 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.532265 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.532282 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.532292 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:23Z","lastTransitionTime":"2025-10-11T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.634747 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.634794 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.634803 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.634817 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.634826 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:23Z","lastTransitionTime":"2025-10-11T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.737035 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.737081 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.737095 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.737112 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.737124 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:23Z","lastTransitionTime":"2025-10-11T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.839705 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.839744 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.839753 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.839787 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.839797 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:23Z","lastTransitionTime":"2025-10-11T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.942318 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.942352 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.942365 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.942379 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.942391 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:23Z","lastTransitionTime":"2025-10-11T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.993267 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:23 crc kubenswrapper[5055]: I1011 06:54:23.993306 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:23 crc kubenswrapper[5055]: E1011 06:54:23.993439 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:23 crc kubenswrapper[5055]: E1011 06:54:23.993520 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.045675 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.045718 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.045738 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.045792 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.045808 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:24Z","lastTransitionTime":"2025-10-11T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.148579 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.148634 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.148643 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.148656 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.148664 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:24Z","lastTransitionTime":"2025-10-11T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.250937 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.251148 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.251207 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.251265 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.251354 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:24Z","lastTransitionTime":"2025-10-11T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.278105 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovnkube-controller/2.log" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.278820 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovnkube-controller/1.log" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.283119 5055 generic.go:334] "Generic (PLEG): container finished" podID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerID="457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a" exitCode=1 Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.283187 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerDied","Data":"457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a"} Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.283247 5055 scope.go:117] "RemoveContainer" containerID="4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.284251 5055 scope.go:117] "RemoveContainer" containerID="457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a" Oct 11 06:54:24 crc kubenswrapper[5055]: E1011 06:54:24.284543 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.303371 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.315992 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.325951 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.339194 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.350038 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.354089 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.354343 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.354545 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.354748 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.354996 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:24Z","lastTransitionTime":"2025-10-11T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.362797 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.378103 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.403107 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4c302aeb395a094a62f88319ca5eaadd14f8daea63097ba0408f0234fedea52a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:07Z\\\",\\\"message\\\":\\\"06:54:06.975505 6485 services_controller.go:445] Built service openshift-network-diagnostics/network-check-target LB template configs for network=default: []services.lbConfig(nil)\\\\nI1011 06:54:06.975512 6485 ovn.go:134] Ensuring zone local for Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf in node crc\\\\nI1011 06:54:06.975519 6485 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-operator/network-operator-58b4c7f79c-55gtf after 0 failed attempt(s)\\\\nF1011 06:54:06.975523 6485 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:06Z is after 2025-08-24T17:21:41Z]\\\\nI1011 06:54:06.975528 6\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:23Z\\\",\\\"message\\\":\\\"ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1011 06:54:23.887731 6716 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.418214 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.431456 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.459470 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.459535 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.459558 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.459590 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.459617 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:24Z","lastTransitionTime":"2025-10-11T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.474357 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.491417 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.506192 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.518216 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.529595 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.541255 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.550995 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:24Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.561743 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.561840 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.561855 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.561871 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.561883 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:24Z","lastTransitionTime":"2025-10-11T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.664932 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.664979 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.664995 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.665014 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.665026 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:24Z","lastTransitionTime":"2025-10-11T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.767468 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.767552 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.767600 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.767622 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.767635 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:24Z","lastTransitionTime":"2025-10-11T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.871192 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.871239 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.871251 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.871268 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.871280 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:24Z","lastTransitionTime":"2025-10-11T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.974254 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.974294 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.974304 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.974320 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.974331 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:24Z","lastTransitionTime":"2025-10-11T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.992683 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:24 crc kubenswrapper[5055]: I1011 06:54:24.992697 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:24 crc kubenswrapper[5055]: E1011 06:54:24.992977 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:24 crc kubenswrapper[5055]: E1011 06:54:24.993111 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.076892 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.076928 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.076938 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.076954 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.076964 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:25Z","lastTransitionTime":"2025-10-11T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.184822 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.184878 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.184889 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.184907 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.184918 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:25Z","lastTransitionTime":"2025-10-11T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.287419 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.287485 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.287504 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.287530 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.287552 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:25Z","lastTransitionTime":"2025-10-11T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.289399 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovnkube-controller/2.log" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.293805 5055 scope.go:117] "RemoveContainer" containerID="457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a" Oct 11 06:54:25 crc kubenswrapper[5055]: E1011 06:54:25.294009 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.328077 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.341046 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.353691 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.368702 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.382576 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.389972 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.390026 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.390035 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.390050 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.390060 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:25Z","lastTransitionTime":"2025-10-11T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.395691 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.413180 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.425972 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.437567 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.448983 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.466051 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.478126 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.491060 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.492425 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.492457 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.492468 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.492484 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.492496 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:25Z","lastTransitionTime":"2025-10-11T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.506096 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.517311 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.536841 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:23Z\\\",\\\"message\\\":\\\"ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1011 06:54:23.887731 6716 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.551803 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.595112 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.595167 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.595185 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.595213 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.595232 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:25Z","lastTransitionTime":"2025-10-11T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.661139 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.674380 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.680625 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.696841 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.699268 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.699338 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.699365 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.699396 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.699421 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:25Z","lastTransitionTime":"2025-10-11T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.719398 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.736830 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.750988 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.767019 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.784704 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.802022 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.802101 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.802142 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.802167 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.802182 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:25Z","lastTransitionTime":"2025-10-11T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.807158 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:23Z\\\",\\\"message\\\":\\\"ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1011 06:54:23.887731 6716 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.819884 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.832988 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.851988 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.867183 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.882884 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.900854 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.904475 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.904495 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.904505 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.904518 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.904528 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:25Z","lastTransitionTime":"2025-10-11T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.914282 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.931882 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.944286 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:25Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.992709 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:25 crc kubenswrapper[5055]: I1011 06:54:25.992818 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:25 crc kubenswrapper[5055]: E1011 06:54:25.992951 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:25 crc kubenswrapper[5055]: E1011 06:54:25.993109 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.008076 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.008121 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.008134 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.008154 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.008168 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:26Z","lastTransitionTime":"2025-10-11T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.110272 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.110318 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.110329 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.110347 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.110359 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:26Z","lastTransitionTime":"2025-10-11T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.213559 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.213595 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.213603 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.213617 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.213626 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:26Z","lastTransitionTime":"2025-10-11T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.315821 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.315877 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.315892 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.315911 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.315932 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:26Z","lastTransitionTime":"2025-10-11T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.418047 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.418130 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.418153 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.418179 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.418195 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:26Z","lastTransitionTime":"2025-10-11T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.521368 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.521417 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.521432 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.521454 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.521469 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:26Z","lastTransitionTime":"2025-10-11T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.611956 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs\") pod \"network-metrics-daemon-glhzm\" (UID: \"c7861c5b-622e-4cce-9360-be9885299bd4\") " pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:26 crc kubenswrapper[5055]: E1011 06:54:26.612386 5055 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:54:26 crc kubenswrapper[5055]: E1011 06:54:26.612732 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs podName:c7861c5b-622e-4cce-9360-be9885299bd4 nodeName:}" failed. No retries permitted until 2025-10-11 06:54:42.612706059 +0000 UTC m=+66.386979946 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs") pod "network-metrics-daemon-glhzm" (UID: "c7861c5b-622e-4cce-9360-be9885299bd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.624334 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.624367 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.624376 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.624389 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.624399 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:26Z","lastTransitionTime":"2025-10-11T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.726781 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.726817 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.726828 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.726846 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.726860 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:26Z","lastTransitionTime":"2025-10-11T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.828364 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.828396 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.828404 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.828417 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.828426 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:26Z","lastTransitionTime":"2025-10-11T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.930931 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.930979 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.930992 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.931007 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.931018 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:26Z","lastTransitionTime":"2025-10-11T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.992533 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:26 crc kubenswrapper[5055]: I1011 06:54:26.992602 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:26 crc kubenswrapper[5055]: E1011 06:54:26.992682 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:26 crc kubenswrapper[5055]: E1011 06:54:26.992857 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.003264 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.013947 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.024169 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.033890 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.033927 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.033938 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.033953 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.033964 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:27Z","lastTransitionTime":"2025-10-11T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.034731 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.045152 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.055077 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.066336 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.076466 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.098113 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.108956 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.118207 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.135333 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.136379 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.136409 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.136421 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.136437 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.136446 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:27Z","lastTransitionTime":"2025-10-11T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.146580 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.156645 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b726bf93-4666-4d28-8dfe-8b7b88fd82ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6a3e233e4de1c86f7c71778e70332885b5e9d059928dcd320c3711cfff2df75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8aa639ce86b821591da60a6ee18ce1cb4d64b671dc50dfee3fff7d975967411f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0492b1afae66a10f8f8f8decddb20d95f42a1bbb3dc3dbe92c4cbccdb71ff9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.176349 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:23Z\\\",\\\"message\\\":\\\"ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1011 06:54:23.887731 6716 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.187572 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.210204 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.223796 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:27Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.238334 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.238395 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.238408 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.238425 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.238436 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:27Z","lastTransitionTime":"2025-10-11T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.341359 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.341434 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.341454 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.341474 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.341490 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:27Z","lastTransitionTime":"2025-10-11T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.443963 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.444002 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.444013 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.444030 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.444041 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:27Z","lastTransitionTime":"2025-10-11T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.546414 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.546451 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.546460 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.546491 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.546501 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:27Z","lastTransitionTime":"2025-10-11T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.649183 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.649220 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.649228 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.649242 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.649251 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:27Z","lastTransitionTime":"2025-10-11T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.751324 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.751357 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.751365 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.751376 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.751387 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:27Z","lastTransitionTime":"2025-10-11T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.852948 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.852987 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.852998 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.853034 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.853044 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:27Z","lastTransitionTime":"2025-10-11T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.955739 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.955798 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.955810 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.955827 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.955838 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:27Z","lastTransitionTime":"2025-10-11T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.992938 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:27 crc kubenswrapper[5055]: E1011 06:54:27.993052 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:27 crc kubenswrapper[5055]: I1011 06:54:27.993132 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:27 crc kubenswrapper[5055]: E1011 06:54:27.993202 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.057494 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.057545 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.057565 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.057592 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.057610 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:28Z","lastTransitionTime":"2025-10-11T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.160512 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.160561 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.160571 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.160588 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.160599 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:28Z","lastTransitionTime":"2025-10-11T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.263296 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.263331 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.263342 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.263357 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.263367 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:28Z","lastTransitionTime":"2025-10-11T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.365226 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.365267 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.365278 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.365294 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.365306 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:28Z","lastTransitionTime":"2025-10-11T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.467661 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.467692 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.467700 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.467713 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.467721 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:28Z","lastTransitionTime":"2025-10-11T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.570096 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.570147 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.570159 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.570174 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.570183 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:28Z","lastTransitionTime":"2025-10-11T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.672201 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.672258 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.672270 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.672283 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.672291 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:28Z","lastTransitionTime":"2025-10-11T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.731845 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.731970 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.732036 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:00.732010872 +0000 UTC m=+84.506284679 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.732075 5055 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.732144 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.732148 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:55:00.732131575 +0000 UTC m=+84.506405402 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.732229 5055 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.732263 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:55:00.732256389 +0000 UTC m=+84.506530196 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.774222 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.774259 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.774273 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.774289 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.774302 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:28Z","lastTransitionTime":"2025-10-11T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.833372 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.833437 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.833559 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.833596 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.833607 5055 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.833658 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 06:55:00.833643099 +0000 UTC m=+84.607916906 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.833563 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.833697 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.833709 5055 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.833745 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 06:55:00.833734632 +0000 UTC m=+84.608008519 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.876932 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.876968 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.876998 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.877013 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.877023 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:28Z","lastTransitionTime":"2025-10-11T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.979576 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.979621 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.979633 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.979652 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.979664 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:28Z","lastTransitionTime":"2025-10-11T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.992890 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.993038 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:28 crc kubenswrapper[5055]: I1011 06:54:28.993146 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:28 crc kubenswrapper[5055]: E1011 06:54:28.993293 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.081779 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.081814 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.081824 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.081839 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.081847 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:29Z","lastTransitionTime":"2025-10-11T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.184000 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.184038 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.184049 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.184061 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.184070 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:29Z","lastTransitionTime":"2025-10-11T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.286136 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.286176 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.286186 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.286201 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.286213 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:29Z","lastTransitionTime":"2025-10-11T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.388119 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.388147 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.388157 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.388169 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.388177 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:29Z","lastTransitionTime":"2025-10-11T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.490950 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.490994 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.491003 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.491017 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.491026 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:29Z","lastTransitionTime":"2025-10-11T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.592682 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.592720 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.592729 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.592741 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.592750 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:29Z","lastTransitionTime":"2025-10-11T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.694832 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.694867 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.694876 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.694890 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.694899 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:29Z","lastTransitionTime":"2025-10-11T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.797246 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.797285 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.797294 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.797311 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.797323 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:29Z","lastTransitionTime":"2025-10-11T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.900195 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.900303 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.900317 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.900337 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.900351 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:29Z","lastTransitionTime":"2025-10-11T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.993184 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:29 crc kubenswrapper[5055]: I1011 06:54:29.993184 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:29 crc kubenswrapper[5055]: E1011 06:54:29.993313 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:29 crc kubenswrapper[5055]: E1011 06:54:29.993399 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.005550 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.005590 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.005600 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.005615 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.005627 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:30Z","lastTransitionTime":"2025-10-11T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.108391 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.108453 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.108462 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.108479 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.108487 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:30Z","lastTransitionTime":"2025-10-11T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.211385 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.211469 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.211493 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.211525 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.211549 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:30Z","lastTransitionTime":"2025-10-11T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.313736 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.313762 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.313790 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.313802 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.313812 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:30Z","lastTransitionTime":"2025-10-11T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.416880 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.416925 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.416934 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.416949 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.416974 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:30Z","lastTransitionTime":"2025-10-11T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.520684 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.520725 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.520735 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.520751 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.520777 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:30Z","lastTransitionTime":"2025-10-11T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.623335 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.623373 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.623384 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.623398 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.623409 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:30Z","lastTransitionTime":"2025-10-11T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.725715 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.725748 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.725756 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.725788 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.725797 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:30Z","lastTransitionTime":"2025-10-11T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.828311 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.828384 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.828395 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.828410 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.828420 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:30Z","lastTransitionTime":"2025-10-11T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.931536 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.931598 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.931619 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.931645 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.931662 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:30Z","lastTransitionTime":"2025-10-11T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.992685 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:30 crc kubenswrapper[5055]: I1011 06:54:30.992733 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:30 crc kubenswrapper[5055]: E1011 06:54:30.992898 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:30 crc kubenswrapper[5055]: E1011 06:54:30.993073 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.035259 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.035400 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.035423 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.035454 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.035473 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:31Z","lastTransitionTime":"2025-10-11T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.137696 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.137732 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.137742 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.137757 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.137796 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:31Z","lastTransitionTime":"2025-10-11T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.240359 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.240403 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.240413 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.240426 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.240434 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:31Z","lastTransitionTime":"2025-10-11T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.343744 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.343809 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.343822 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.343839 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.343852 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:31Z","lastTransitionTime":"2025-10-11T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.447287 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.447338 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.447351 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.447369 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.447381 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:31Z","lastTransitionTime":"2025-10-11T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.549648 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.549685 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.549695 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.549709 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.549718 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:31Z","lastTransitionTime":"2025-10-11T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.651979 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.652034 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.652050 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.652074 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.652090 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:31Z","lastTransitionTime":"2025-10-11T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.755015 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.755065 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.755080 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.755100 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.755109 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:31Z","lastTransitionTime":"2025-10-11T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.858825 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.858948 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.858967 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.858992 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.859009 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:31Z","lastTransitionTime":"2025-10-11T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.962172 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.962227 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.962245 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.962270 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.962287 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:31Z","lastTransitionTime":"2025-10-11T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.993006 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:31 crc kubenswrapper[5055]: I1011 06:54:31.993043 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:31 crc kubenswrapper[5055]: E1011 06:54:31.993128 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:31 crc kubenswrapper[5055]: E1011 06:54:31.993240 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.064086 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.064127 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.064136 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.064152 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.064162 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:32Z","lastTransitionTime":"2025-10-11T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.165884 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.165912 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.165920 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.165933 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.165943 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:32Z","lastTransitionTime":"2025-10-11T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.268119 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.268178 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.268196 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.268221 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.268236 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:32Z","lastTransitionTime":"2025-10-11T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.371074 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.371144 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.371164 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.371188 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.371206 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:32Z","lastTransitionTime":"2025-10-11T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.474117 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.474201 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.474228 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.474259 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.474287 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:32Z","lastTransitionTime":"2025-10-11T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.576942 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.576976 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.576985 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.576999 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.577008 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:32Z","lastTransitionTime":"2025-10-11T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.679684 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.679749 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.679805 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.679838 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.679865 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:32Z","lastTransitionTime":"2025-10-11T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.783114 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.783223 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.783244 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.783273 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.783297 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:32Z","lastTransitionTime":"2025-10-11T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.886210 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.886334 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.886354 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.886384 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.886402 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:32Z","lastTransitionTime":"2025-10-11T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.989808 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.989886 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.989908 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.989939 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.989998 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:32Z","lastTransitionTime":"2025-10-11T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.993760 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:32 crc kubenswrapper[5055]: I1011 06:54:32.993885 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:32 crc kubenswrapper[5055]: E1011 06:54:32.994007 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:32 crc kubenswrapper[5055]: E1011 06:54:32.994267 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.093688 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.093796 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.093817 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.093896 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.093920 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:33Z","lastTransitionTime":"2025-10-11T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.197095 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.197326 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.197337 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.197353 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.197363 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:33Z","lastTransitionTime":"2025-10-11T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.299882 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.299916 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.299925 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.299939 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.299948 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:33Z","lastTransitionTime":"2025-10-11T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.334580 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.334611 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.334622 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.334638 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.334649 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:33Z","lastTransitionTime":"2025-10-11T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:33 crc kubenswrapper[5055]: E1011 06:54:33.349285 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:33Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.354132 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.354176 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.354188 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.354204 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.354214 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:33Z","lastTransitionTime":"2025-10-11T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:33 crc kubenswrapper[5055]: E1011 06:54:33.372782 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:33Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.378217 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.378290 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.378312 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.378344 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.378365 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:33Z","lastTransitionTime":"2025-10-11T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:33 crc kubenswrapper[5055]: E1011 06:54:33.401664 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:33Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.408063 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.408133 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.408155 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.408182 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.408203 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:33Z","lastTransitionTime":"2025-10-11T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:33 crc kubenswrapper[5055]: E1011 06:54:33.424230 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:33Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.429850 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.429918 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.429951 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.429983 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.430005 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:33Z","lastTransitionTime":"2025-10-11T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:33 crc kubenswrapper[5055]: E1011 06:54:33.452136 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:33Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:33 crc kubenswrapper[5055]: E1011 06:54:33.452386 5055 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.454796 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.454830 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.454843 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.454866 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.454880 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:33Z","lastTransitionTime":"2025-10-11T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.558586 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.558650 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.558665 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.558689 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.558703 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:33Z","lastTransitionTime":"2025-10-11T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.661689 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.661751 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.661822 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.661862 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.661888 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:33Z","lastTransitionTime":"2025-10-11T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.764679 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.764709 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.764717 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.764731 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.764740 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:33Z","lastTransitionTime":"2025-10-11T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.868379 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.868439 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.868456 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.868480 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.868496 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:33Z","lastTransitionTime":"2025-10-11T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.971416 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.971486 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.971507 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.971538 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.971559 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:33Z","lastTransitionTime":"2025-10-11T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.992359 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:33 crc kubenswrapper[5055]: I1011 06:54:33.992449 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:33 crc kubenswrapper[5055]: E1011 06:54:33.992491 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:33 crc kubenswrapper[5055]: E1011 06:54:33.992636 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.075020 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.075076 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.075087 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.075109 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.075123 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:34Z","lastTransitionTime":"2025-10-11T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.179398 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.179473 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.179495 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.179515 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.179528 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:34Z","lastTransitionTime":"2025-10-11T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.283096 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.283158 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.283170 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.283188 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.283200 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:34Z","lastTransitionTime":"2025-10-11T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.385945 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.385991 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.386000 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.386016 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.386027 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:34Z","lastTransitionTime":"2025-10-11T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.489473 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.489541 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.489564 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.489593 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.489617 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:34Z","lastTransitionTime":"2025-10-11T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.593095 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.593170 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.593185 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.593215 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.593232 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:34Z","lastTransitionTime":"2025-10-11T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.698982 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.699113 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.699142 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.699212 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.699238 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:34Z","lastTransitionTime":"2025-10-11T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.803028 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.803121 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.803147 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.803184 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.803207 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:34Z","lastTransitionTime":"2025-10-11T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.907086 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.907145 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.907161 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.907186 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.907203 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:34Z","lastTransitionTime":"2025-10-11T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.993363 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:34 crc kubenswrapper[5055]: I1011 06:54:34.993556 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:34 crc kubenswrapper[5055]: E1011 06:54:34.993635 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:34 crc kubenswrapper[5055]: E1011 06:54:34.993884 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.011510 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.011601 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.011622 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.011652 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.011677 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:35Z","lastTransitionTime":"2025-10-11T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.114952 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.115018 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.115037 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.115061 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.115079 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:35Z","lastTransitionTime":"2025-10-11T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.217536 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.217611 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.217634 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.217661 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.217681 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:35Z","lastTransitionTime":"2025-10-11T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.321820 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.321937 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.321960 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.321988 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.322009 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:35Z","lastTransitionTime":"2025-10-11T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.425505 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.425587 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.425605 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.425634 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.425652 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:35Z","lastTransitionTime":"2025-10-11T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.529156 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.529234 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.529259 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.529289 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.529314 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:35Z","lastTransitionTime":"2025-10-11T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.634105 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.634178 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.634206 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.634407 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.634434 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:35Z","lastTransitionTime":"2025-10-11T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.739084 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.739166 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.739186 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.739215 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.739237 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:35Z","lastTransitionTime":"2025-10-11T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.842433 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.842505 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.842526 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.842556 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.842579 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:35Z","lastTransitionTime":"2025-10-11T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.946222 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.946307 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.946337 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.946376 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.946402 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:35Z","lastTransitionTime":"2025-10-11T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.993004 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:35 crc kubenswrapper[5055]: I1011 06:54:35.993136 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:35 crc kubenswrapper[5055]: E1011 06:54:35.993212 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:35 crc kubenswrapper[5055]: E1011 06:54:35.993374 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.049434 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.049495 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.049510 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.049532 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.049545 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:36Z","lastTransitionTime":"2025-10-11T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.152138 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.152207 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.152226 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.152254 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.152271 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:36Z","lastTransitionTime":"2025-10-11T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.255720 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.255848 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.255877 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.255910 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.255941 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:36Z","lastTransitionTime":"2025-10-11T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.359349 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.359406 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.359419 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.359440 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.359458 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:36Z","lastTransitionTime":"2025-10-11T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.462010 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.462103 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.462123 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.462154 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.462174 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:36Z","lastTransitionTime":"2025-10-11T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.566538 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.566577 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.566586 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.566602 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.566612 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:36Z","lastTransitionTime":"2025-10-11T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.669024 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.669131 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.669153 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.669183 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.669210 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:36Z","lastTransitionTime":"2025-10-11T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.771685 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.771837 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.771874 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.771910 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.771952 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:36Z","lastTransitionTime":"2025-10-11T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.874529 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.874580 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.874595 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.874619 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.874634 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:36Z","lastTransitionTime":"2025-10-11T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.978089 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.978152 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.978169 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.978197 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.978213 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:36Z","lastTransitionTime":"2025-10-11T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.994074 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:36 crc kubenswrapper[5055]: E1011 06:54:36.994373 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.994421 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:36 crc kubenswrapper[5055]: E1011 06:54:36.994548 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:36 crc kubenswrapper[5055]: I1011 06:54:36.995319 5055 scope.go:117] "RemoveContainer" containerID="457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a" Oct 11 06:54:36 crc kubenswrapper[5055]: E1011 06:54:36.995630 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.008789 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.021696 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.034929 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b726bf93-4666-4d28-8dfe-8b7b88fd82ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6a3e233e4de1c86f7c71778e70332885b5e9d059928dcd320c3711cfff2df75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8aa639ce86b821591da60a6ee18ce1cb4d64b671dc50dfee3fff7d975967411f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0492b1afae66a10f8f8f8decddb20d95f42a1bbb3dc3dbe92c4cbccdb71ff9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.052808 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:23Z\\\",\\\"message\\\":\\\"ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1011 06:54:23.887731 6716 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.064034 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.073683 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.079792 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.079933 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.080032 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.080152 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.080241 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:37Z","lastTransitionTime":"2025-10-11T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.126181 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.142659 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.158010 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.170545 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.180797 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.182872 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.182999 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.183075 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.183171 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.183248 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:37Z","lastTransitionTime":"2025-10-11T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.191467 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.200842 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.212886 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.225377 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.240996 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.257042 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.270874 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:37Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.286683 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.286736 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.286747 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.286777 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.286789 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:37Z","lastTransitionTime":"2025-10-11T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.389977 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.390042 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.390061 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.390089 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.390108 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:37Z","lastTransitionTime":"2025-10-11T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.492492 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.492561 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.492576 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.492601 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.492633 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:37Z","lastTransitionTime":"2025-10-11T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.595533 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.595578 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.595608 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.595632 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.595645 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:37Z","lastTransitionTime":"2025-10-11T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.702336 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.702387 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.702398 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.702416 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.702430 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:37Z","lastTransitionTime":"2025-10-11T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.806550 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.806602 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.806619 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.806639 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.806652 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:37Z","lastTransitionTime":"2025-10-11T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.909692 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.909739 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.909750 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.909780 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.909790 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:37Z","lastTransitionTime":"2025-10-11T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.993017 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:37 crc kubenswrapper[5055]: I1011 06:54:37.993029 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:37 crc kubenswrapper[5055]: E1011 06:54:37.993199 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:37 crc kubenswrapper[5055]: E1011 06:54:37.993370 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.012919 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.013001 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.013022 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.013048 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.013069 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:38Z","lastTransitionTime":"2025-10-11T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.115506 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.115542 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.115552 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.115565 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.115575 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:38Z","lastTransitionTime":"2025-10-11T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.218730 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.218828 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.218842 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.218861 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.218874 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:38Z","lastTransitionTime":"2025-10-11T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.323361 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.323433 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.323451 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.323518 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.323538 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:38Z","lastTransitionTime":"2025-10-11T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.426312 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.426355 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.426366 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.426416 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.426434 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:38Z","lastTransitionTime":"2025-10-11T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.529091 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.529130 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.529141 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.529158 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.529175 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:38Z","lastTransitionTime":"2025-10-11T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.632122 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.632190 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.632208 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.632234 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.632251 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:38Z","lastTransitionTime":"2025-10-11T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.735167 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.735217 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.735230 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.735269 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.735284 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:38Z","lastTransitionTime":"2025-10-11T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.838544 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.838623 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.838644 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.838664 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.838704 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:38Z","lastTransitionTime":"2025-10-11T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.941376 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.941413 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.941421 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.941445 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.941456 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:38Z","lastTransitionTime":"2025-10-11T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.995264 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:38 crc kubenswrapper[5055]: I1011 06:54:38.995269 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:38 crc kubenswrapper[5055]: E1011 06:54:38.995429 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:38 crc kubenswrapper[5055]: E1011 06:54:38.996671 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.044017 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.044073 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.044084 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.044105 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.044118 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:39Z","lastTransitionTime":"2025-10-11T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.146354 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.146420 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.146438 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.146456 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.146469 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:39Z","lastTransitionTime":"2025-10-11T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.251092 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.251153 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.251163 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.251184 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.251196 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:39Z","lastTransitionTime":"2025-10-11T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.354233 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.354308 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.354329 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.354367 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.354391 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:39Z","lastTransitionTime":"2025-10-11T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.457637 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.457671 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.457681 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.457697 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.457707 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:39Z","lastTransitionTime":"2025-10-11T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.559718 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.559785 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.559796 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.559810 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.559819 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:39Z","lastTransitionTime":"2025-10-11T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.661995 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.662039 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.662050 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.662065 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.662079 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:39Z","lastTransitionTime":"2025-10-11T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.764601 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.764641 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.764660 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.764677 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.764690 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:39Z","lastTransitionTime":"2025-10-11T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.867100 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.867166 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.867180 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.867196 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.867208 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:39Z","lastTransitionTime":"2025-10-11T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.969438 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.969465 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.969474 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.969488 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.969500 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:39Z","lastTransitionTime":"2025-10-11T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.992941 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:39 crc kubenswrapper[5055]: I1011 06:54:39.993026 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:39 crc kubenswrapper[5055]: E1011 06:54:39.993117 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:39 crc kubenswrapper[5055]: E1011 06:54:39.993283 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.071491 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.071540 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.071552 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.071570 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.071583 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:40Z","lastTransitionTime":"2025-10-11T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.174109 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.174148 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.174158 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.174176 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.174188 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:40Z","lastTransitionTime":"2025-10-11T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.276229 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.276277 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.276292 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.276309 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.276324 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:40Z","lastTransitionTime":"2025-10-11T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.379002 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.379044 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.379052 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.379070 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.379082 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:40Z","lastTransitionTime":"2025-10-11T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.480988 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.481076 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.481098 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.481128 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.481149 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:40Z","lastTransitionTime":"2025-10-11T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.583153 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.583198 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.583211 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.583229 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.583242 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:40Z","lastTransitionTime":"2025-10-11T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.685867 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.685922 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.685930 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.685946 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.685983 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:40Z","lastTransitionTime":"2025-10-11T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.788774 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.788817 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.788826 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.788841 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.788850 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:40Z","lastTransitionTime":"2025-10-11T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.891199 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.891249 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.891265 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.891288 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.891303 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:40Z","lastTransitionTime":"2025-10-11T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.992499 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.992528 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:40 crc kubenswrapper[5055]: E1011 06:54:40.992793 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:40 crc kubenswrapper[5055]: E1011 06:54:40.993117 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.994312 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.994354 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.994367 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.994387 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:40 crc kubenswrapper[5055]: I1011 06:54:40.994402 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:40Z","lastTransitionTime":"2025-10-11T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.096629 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.096669 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.096680 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.096696 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.096707 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:41Z","lastTransitionTime":"2025-10-11T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.198798 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.198836 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.198848 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.198865 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.198876 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:41Z","lastTransitionTime":"2025-10-11T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.301286 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.301323 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.301331 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.301344 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.301352 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:41Z","lastTransitionTime":"2025-10-11T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.403927 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.403963 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.403972 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.403988 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.403997 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:41Z","lastTransitionTime":"2025-10-11T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.506151 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.506184 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.506193 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.506206 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.506216 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:41Z","lastTransitionTime":"2025-10-11T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.608571 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.608612 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.608621 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.608637 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.608648 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:41Z","lastTransitionTime":"2025-10-11T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.710107 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.710141 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.710150 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.710162 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.710170 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:41Z","lastTransitionTime":"2025-10-11T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.812513 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.812568 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.812581 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.812600 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.812612 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:41Z","lastTransitionTime":"2025-10-11T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.915022 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.915061 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.915070 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.915085 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.915094 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:41Z","lastTransitionTime":"2025-10-11T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.993173 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:41 crc kubenswrapper[5055]: I1011 06:54:41.993177 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:41 crc kubenswrapper[5055]: E1011 06:54:41.993301 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:41 crc kubenswrapper[5055]: E1011 06:54:41.993410 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.017478 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.017524 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.017534 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.017550 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.017563 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:42Z","lastTransitionTime":"2025-10-11T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.120084 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.120143 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.120153 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.120168 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.120178 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:42Z","lastTransitionTime":"2025-10-11T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.222428 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.222486 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.222498 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.222515 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.222525 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:42Z","lastTransitionTime":"2025-10-11T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.325214 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.325266 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.325277 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.325293 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.325321 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:42Z","lastTransitionTime":"2025-10-11T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.428175 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.428215 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.428226 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.428242 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.428253 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:42Z","lastTransitionTime":"2025-10-11T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.530907 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.530974 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.530987 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.531006 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.531019 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:42Z","lastTransitionTime":"2025-10-11T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.633907 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.633956 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.633973 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.633999 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.634016 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:42Z","lastTransitionTime":"2025-10-11T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.682480 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs\") pod \"network-metrics-daemon-glhzm\" (UID: \"c7861c5b-622e-4cce-9360-be9885299bd4\") " pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:42 crc kubenswrapper[5055]: E1011 06:54:42.682630 5055 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:54:42 crc kubenswrapper[5055]: E1011 06:54:42.682682 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs podName:c7861c5b-622e-4cce-9360-be9885299bd4 nodeName:}" failed. No retries permitted until 2025-10-11 06:55:14.682662355 +0000 UTC m=+98.456936162 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs") pod "network-metrics-daemon-glhzm" (UID: "c7861c5b-622e-4cce-9360-be9885299bd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.736654 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.736704 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.736715 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.736731 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.736742 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:42Z","lastTransitionTime":"2025-10-11T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.839239 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.839278 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.839289 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.839304 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.839313 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:42Z","lastTransitionTime":"2025-10-11T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.942200 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.942267 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.942290 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.942322 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.942344 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:42Z","lastTransitionTime":"2025-10-11T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.993296 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:42 crc kubenswrapper[5055]: I1011 06:54:42.993370 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:42 crc kubenswrapper[5055]: E1011 06:54:42.993467 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:42 crc kubenswrapper[5055]: E1011 06:54:42.993719 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.044968 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.045014 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.045026 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.045043 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.045055 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:43Z","lastTransitionTime":"2025-10-11T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.147724 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.147788 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.147800 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.147818 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.147829 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:43Z","lastTransitionTime":"2025-10-11T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.250154 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.250207 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.250219 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.250237 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.250248 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:43Z","lastTransitionTime":"2025-10-11T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.352380 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.352421 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.352432 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.352448 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.352458 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:43Z","lastTransitionTime":"2025-10-11T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.455839 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.455878 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.455889 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.455908 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.455918 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:43Z","lastTransitionTime":"2025-10-11T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.512443 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.512494 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.512505 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.512523 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.512538 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:43Z","lastTransitionTime":"2025-10-11T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:43 crc kubenswrapper[5055]: E1011 06:54:43.524403 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:43Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.528230 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.528259 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.528271 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.528288 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.528301 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:43Z","lastTransitionTime":"2025-10-11T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:43 crc kubenswrapper[5055]: E1011 06:54:43.539803 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:43Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.543812 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.543871 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.543887 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.543908 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.543923 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:43Z","lastTransitionTime":"2025-10-11T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:43 crc kubenswrapper[5055]: E1011 06:54:43.561554 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:43Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.565603 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.565671 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.565696 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.565726 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.565747 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:43Z","lastTransitionTime":"2025-10-11T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:43 crc kubenswrapper[5055]: E1011 06:54:43.585336 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:43Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.590706 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.590750 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.590778 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.590798 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.590810 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:43Z","lastTransitionTime":"2025-10-11T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:43 crc kubenswrapper[5055]: E1011 06:54:43.610166 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:43Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:43 crc kubenswrapper[5055]: E1011 06:54:43.610290 5055 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.612399 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.612427 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.612436 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.612463 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.612475 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:43Z","lastTransitionTime":"2025-10-11T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.715466 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.715508 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.715516 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.715531 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.715542 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:43Z","lastTransitionTime":"2025-10-11T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.818231 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.818274 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.818287 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.818303 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.818315 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:43Z","lastTransitionTime":"2025-10-11T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.920810 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.920848 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.920859 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.920876 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.920888 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:43Z","lastTransitionTime":"2025-10-11T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.992969 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:43 crc kubenswrapper[5055]: E1011 06:54:43.993287 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:43 crc kubenswrapper[5055]: I1011 06:54:43.993640 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:43 crc kubenswrapper[5055]: E1011 06:54:43.993753 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.023302 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.023350 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.023365 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.023387 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.023402 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:44Z","lastTransitionTime":"2025-10-11T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.126179 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.126221 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.126233 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.126252 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.126265 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:44Z","lastTransitionTime":"2025-10-11T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.228704 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.228746 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.228781 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.228799 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.228813 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:44Z","lastTransitionTime":"2025-10-11T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.331109 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.331147 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.331156 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.331171 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.331181 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:44Z","lastTransitionTime":"2025-10-11T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.433670 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.434011 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.434023 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.434039 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.434048 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:44Z","lastTransitionTime":"2025-10-11T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.535780 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.535819 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.535829 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.535845 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.535856 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:44Z","lastTransitionTime":"2025-10-11T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.637708 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.637742 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.637752 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.637788 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.637801 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:44Z","lastTransitionTime":"2025-10-11T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.740156 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.740197 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.740206 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.740222 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.740231 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:44Z","lastTransitionTime":"2025-10-11T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.842951 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.842997 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.843009 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.843026 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.843041 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:44Z","lastTransitionTime":"2025-10-11T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.945358 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.945424 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.945442 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.945466 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.945484 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:44Z","lastTransitionTime":"2025-10-11T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.993175 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:44 crc kubenswrapper[5055]: I1011 06:54:44.993188 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:44 crc kubenswrapper[5055]: E1011 06:54:44.993353 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:44 crc kubenswrapper[5055]: E1011 06:54:44.993434 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.047644 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.047682 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.047690 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.047704 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.047717 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:45Z","lastTransitionTime":"2025-10-11T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.150186 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.150233 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.150246 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.150265 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.150279 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:45Z","lastTransitionTime":"2025-10-11T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.253088 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.253127 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.253135 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.253149 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.253159 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:45Z","lastTransitionTime":"2025-10-11T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.354974 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.355015 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.355024 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.355037 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.355046 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:45Z","lastTransitionTime":"2025-10-11T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.356434 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4lplf_c2f344f5-5570-4fb6-b59d-5b881cd1d2cc/kube-multus/0.log" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.356469 5055 generic.go:334] "Generic (PLEG): container finished" podID="c2f344f5-5570-4fb6-b59d-5b881cd1d2cc" containerID="4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c" exitCode=1 Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.356493 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4lplf" event={"ID":"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc","Type":"ContainerDied","Data":"4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c"} Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.356823 5055 scope.go:117] "RemoveContainer" containerID="4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.375111 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.390408 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.401003 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.417349 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.431836 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.443183 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.456701 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:44Z\\\",\\\"message\\\":\\\"2025-10-11T06:53:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d\\\\n2025-10-11T06:53:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d to /host/opt/cni/bin/\\\\n2025-10-11T06:53:59Z [verbose] multus-daemon started\\\\n2025-10-11T06:53:59Z [verbose] Readiness Indicator file check\\\\n2025-10-11T06:54:44Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.457053 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.457098 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.457109 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.457128 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.457140 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:45Z","lastTransitionTime":"2025-10-11T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.471821 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.484488 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.497138 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.511035 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.520701 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.528642 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.540012 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.550207 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.561968 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b726bf93-4666-4d28-8dfe-8b7b88fd82ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6a3e233e4de1c86f7c71778e70332885b5e9d059928dcd320c3711cfff2df75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8aa639ce86b821591da60a6ee18ce1cb4d64b671dc50dfee3fff7d975967411f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0492b1afae66a10f8f8f8decddb20d95f42a1bbb3dc3dbe92c4cbccdb71ff9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.562493 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.562526 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.562541 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.562558 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.562570 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:45Z","lastTransitionTime":"2025-10-11T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.578213 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:23Z\\\",\\\"message\\\":\\\"ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1011 06:54:23.887731 6716 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.589393 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:45Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.664415 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.664464 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.664479 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.664500 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.664512 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:45Z","lastTransitionTime":"2025-10-11T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.766944 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.766977 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.766988 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.767004 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.767016 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:45Z","lastTransitionTime":"2025-10-11T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.869085 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.869122 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.869132 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.869149 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.869158 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:45Z","lastTransitionTime":"2025-10-11T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.971047 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.971083 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.971090 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.971106 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.971115 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:45Z","lastTransitionTime":"2025-10-11T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.992492 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:45 crc kubenswrapper[5055]: I1011 06:54:45.992528 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:45 crc kubenswrapper[5055]: E1011 06:54:45.992609 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:45 crc kubenswrapper[5055]: E1011 06:54:45.992682 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.073741 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.073835 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.073849 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.073869 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.073882 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:46Z","lastTransitionTime":"2025-10-11T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.177087 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.177167 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.177190 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.177224 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.177245 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:46Z","lastTransitionTime":"2025-10-11T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.279807 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.279871 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.279889 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.279914 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.279932 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:46Z","lastTransitionTime":"2025-10-11T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.362303 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4lplf_c2f344f5-5570-4fb6-b59d-5b881cd1d2cc/kube-multus/0.log" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.362374 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4lplf" event={"ID":"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc","Type":"ContainerStarted","Data":"649eaa9ef8bd7b297db51d6ef876f638cb0f10efb41df3e6cb169ac227f7f849"} Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.381143 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.384655 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.384696 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.384707 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.384722 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.384733 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:46Z","lastTransitionTime":"2025-10-11T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.391476 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.404489 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.414843 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.429121 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.439807 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://649eaa9ef8bd7b297db51d6ef876f638cb0f10efb41df3e6cb169ac227f7f849\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:44Z\\\",\\\"message\\\":\\\"2025-10-11T06:53:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d\\\\n2025-10-11T06:53:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d to /host/opt/cni/bin/\\\\n2025-10-11T06:53:59Z [verbose] multus-daemon started\\\\n2025-10-11T06:53:59Z [verbose] Readiness Indicator file check\\\\n2025-10-11T06:54:44Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.449235 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.461961 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.472017 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.482670 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.487960 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.488011 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.488026 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.488043 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.488055 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:46Z","lastTransitionTime":"2025-10-11T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.495528 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.504408 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.520355 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.530530 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.540163 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b726bf93-4666-4d28-8dfe-8b7b88fd82ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6a3e233e4de1c86f7c71778e70332885b5e9d059928dcd320c3711cfff2df75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8aa639ce86b821591da60a6ee18ce1cb4d64b671dc50dfee3fff7d975967411f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0492b1afae66a10f8f8f8decddb20d95f42a1bbb3dc3dbe92c4cbccdb71ff9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.557216 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:23Z\\\",\\\"message\\\":\\\"ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1011 06:54:23.887731 6716 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.565558 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.574026 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:46Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.590074 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.590315 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.590405 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.590492 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.590555 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:46Z","lastTransitionTime":"2025-10-11T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.693667 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.693710 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.693726 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.693746 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.693791 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:46Z","lastTransitionTime":"2025-10-11T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.796808 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.796880 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.796897 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.796920 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.796938 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:46Z","lastTransitionTime":"2025-10-11T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.899358 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.899626 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.899757 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.899855 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.899931 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:46Z","lastTransitionTime":"2025-10-11T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.993309 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:46 crc kubenswrapper[5055]: I1011 06:54:46.993326 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:46 crc kubenswrapper[5055]: E1011 06:54:46.993434 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:46 crc kubenswrapper[5055]: E1011 06:54:46.993630 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.002953 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.002983 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.002992 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.003006 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.003016 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:47Z","lastTransitionTime":"2025-10-11T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.006871 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.018001 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.028081 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b726bf93-4666-4d28-8dfe-8b7b88fd82ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6a3e233e4de1c86f7c71778e70332885b5e9d059928dcd320c3711cfff2df75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8aa639ce86b821591da60a6ee18ce1cb4d64b671dc50dfee3fff7d975967411f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0492b1afae66a10f8f8f8decddb20d95f42a1bbb3dc3dbe92c4cbccdb71ff9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.044256 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:23Z\\\",\\\"message\\\":\\\"ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1011 06:54:23.887731 6716 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.054224 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.068717 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.085718 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.094020 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.103939 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.104947 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.105034 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.105207 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.105299 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.105395 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:47Z","lastTransitionTime":"2025-10-11T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.116401 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.126404 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.136303 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://649eaa9ef8bd7b297db51d6ef876f638cb0f10efb41df3e6cb169ac227f7f849\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:44Z\\\",\\\"message\\\":\\\"2025-10-11T06:53:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d\\\\n2025-10-11T06:53:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d to /host/opt/cni/bin/\\\\n2025-10-11T06:53:59Z [verbose] multus-daemon started\\\\n2025-10-11T06:53:59Z [verbose] Readiness Indicator file check\\\\n2025-10-11T06:54:44Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.146132 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.157325 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.167092 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.178280 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.190216 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.200958 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:47Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.207754 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.208029 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.208248 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.208453 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.208742 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:47Z","lastTransitionTime":"2025-10-11T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.311734 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.311782 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.311791 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.311803 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.311811 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:47Z","lastTransitionTime":"2025-10-11T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.414534 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.414584 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.414597 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.414615 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.414625 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:47Z","lastTransitionTime":"2025-10-11T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.516848 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.516894 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.516909 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.516926 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.516938 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:47Z","lastTransitionTime":"2025-10-11T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.619403 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.619444 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.619453 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.619469 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.619478 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:47Z","lastTransitionTime":"2025-10-11T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.722308 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.722350 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.722359 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.722376 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.722385 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:47Z","lastTransitionTime":"2025-10-11T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.825263 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.825292 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.825301 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.825313 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.825324 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:47Z","lastTransitionTime":"2025-10-11T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.927293 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.927325 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.927334 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.927352 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.927361 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:47Z","lastTransitionTime":"2025-10-11T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.993547 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:47 crc kubenswrapper[5055]: E1011 06:54:47.993683 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:47 crc kubenswrapper[5055]: I1011 06:54:47.993548 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:47 crc kubenswrapper[5055]: E1011 06:54:47.993929 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.030117 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.030170 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.030187 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.030211 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.030228 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:48Z","lastTransitionTime":"2025-10-11T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.132730 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.132790 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.132800 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.132814 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.132825 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:48Z","lastTransitionTime":"2025-10-11T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.235104 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.235154 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.235180 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.235195 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.235203 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:48Z","lastTransitionTime":"2025-10-11T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.337950 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.338002 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.338014 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.338031 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.338043 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:48Z","lastTransitionTime":"2025-10-11T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.440693 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.440753 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.440806 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.440833 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.440857 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:48Z","lastTransitionTime":"2025-10-11T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.542743 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.542804 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.542817 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.542831 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.542841 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:48Z","lastTransitionTime":"2025-10-11T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.645469 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.645512 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.645529 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.645552 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.645567 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:48Z","lastTransitionTime":"2025-10-11T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.747836 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.747881 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.747892 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.747907 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.747921 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:48Z","lastTransitionTime":"2025-10-11T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.850226 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.850261 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.850272 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.850289 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.850302 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:48Z","lastTransitionTime":"2025-10-11T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.952593 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.952635 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.952646 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.952662 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.952675 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:48Z","lastTransitionTime":"2025-10-11T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.992852 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:48 crc kubenswrapper[5055]: E1011 06:54:48.993003 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:48 crc kubenswrapper[5055]: I1011 06:54:48.992858 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:48 crc kubenswrapper[5055]: E1011 06:54:48.993267 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.055086 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.055126 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.055134 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.055150 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.055160 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:49Z","lastTransitionTime":"2025-10-11T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.157659 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.157700 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.157713 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.157729 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.157744 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:49Z","lastTransitionTime":"2025-10-11T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.260811 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.260852 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.260861 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.260877 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.260887 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:49Z","lastTransitionTime":"2025-10-11T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.363666 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.363719 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.363728 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.363743 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.363759 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:49Z","lastTransitionTime":"2025-10-11T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.466797 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.466825 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.466833 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.466845 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.466853 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:49Z","lastTransitionTime":"2025-10-11T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.568869 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.568926 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.568943 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.568966 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.568982 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:49Z","lastTransitionTime":"2025-10-11T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.671819 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.671887 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.671910 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.671940 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.671963 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:49Z","lastTransitionTime":"2025-10-11T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.775244 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.775291 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.775307 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.775329 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.775349 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:49Z","lastTransitionTime":"2025-10-11T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.879399 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.879448 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.879467 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.879492 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.879509 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:49Z","lastTransitionTime":"2025-10-11T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.983374 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.983416 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.983431 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.983451 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.983466 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:49Z","lastTransitionTime":"2025-10-11T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.992383 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:49 crc kubenswrapper[5055]: E1011 06:54:49.992516 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:49 crc kubenswrapper[5055]: I1011 06:54:49.992402 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:49 crc kubenswrapper[5055]: E1011 06:54:49.992940 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.086820 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.086876 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.086893 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.086916 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.086932 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:50Z","lastTransitionTime":"2025-10-11T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.190145 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.190200 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.190215 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.190234 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.190245 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:50Z","lastTransitionTime":"2025-10-11T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.293740 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.293824 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.293841 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.293869 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.293886 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:50Z","lastTransitionTime":"2025-10-11T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.396654 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.396700 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.396711 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.396734 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.396745 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:50Z","lastTransitionTime":"2025-10-11T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.499878 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.499947 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.499968 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.499997 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.500020 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:50Z","lastTransitionTime":"2025-10-11T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.602940 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.602986 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.603000 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.603019 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.603032 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:50Z","lastTransitionTime":"2025-10-11T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.706728 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.706808 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.706822 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.706842 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.706856 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:50Z","lastTransitionTime":"2025-10-11T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.810185 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.810243 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.810260 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.810284 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.810302 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:50Z","lastTransitionTime":"2025-10-11T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.912604 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.912639 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.912649 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.912664 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.912674 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:50Z","lastTransitionTime":"2025-10-11T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.993090 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.993337 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:50 crc kubenswrapper[5055]: E1011 06:54:50.993475 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:50 crc kubenswrapper[5055]: E1011 06:54:50.994039 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:50 crc kubenswrapper[5055]: I1011 06:54:50.994614 5055 scope.go:117] "RemoveContainer" containerID="457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.004882 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.015184 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.015220 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.015230 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.015245 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.015255 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:51Z","lastTransitionTime":"2025-10-11T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.122784 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.122828 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.122843 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.122865 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.122878 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:51Z","lastTransitionTime":"2025-10-11T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.225423 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.225458 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.225466 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.225479 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.225488 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:51Z","lastTransitionTime":"2025-10-11T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.328526 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.328573 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.328588 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.328607 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.328620 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:51Z","lastTransitionTime":"2025-10-11T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.431022 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.431054 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.431066 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.431082 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.431092 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:51Z","lastTransitionTime":"2025-10-11T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.533113 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.533163 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.533174 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.533191 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.533202 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:51Z","lastTransitionTime":"2025-10-11T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.636192 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.636246 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.636259 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.636279 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.636294 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:51Z","lastTransitionTime":"2025-10-11T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.738946 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.738990 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.739001 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.739020 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.739030 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:51Z","lastTransitionTime":"2025-10-11T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.841092 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.841115 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.841122 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.841135 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.841143 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:51Z","lastTransitionTime":"2025-10-11T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.943838 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.943869 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.943878 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.943891 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.943901 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:51Z","lastTransitionTime":"2025-10-11T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.992835 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:51 crc kubenswrapper[5055]: I1011 06:54:51.992899 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:51 crc kubenswrapper[5055]: E1011 06:54:51.992969 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:51 crc kubenswrapper[5055]: E1011 06:54:51.993026 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.046735 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.046798 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.046822 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.046842 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.046852 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:52Z","lastTransitionTime":"2025-10-11T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.149538 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.149573 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.149585 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.149619 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.149631 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:52Z","lastTransitionTime":"2025-10-11T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.251480 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.251531 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.251542 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.251561 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.251575 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:52Z","lastTransitionTime":"2025-10-11T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.354487 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.354521 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.354530 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.354544 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.354554 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:52Z","lastTransitionTime":"2025-10-11T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.380719 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovnkube-controller/3.log" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.381238 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovnkube-controller/2.log" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.383546 5055 generic.go:334] "Generic (PLEG): container finished" podID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerID="cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae" exitCode=1 Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.383590 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerDied","Data":"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae"} Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.383632 5055 scope.go:117] "RemoveContainer" containerID="457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.384538 5055 scope.go:117] "RemoveContainer" containerID="cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae" Oct 11 06:54:52 crc kubenswrapper[5055]: E1011 06:54:52.384735 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.402223 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.412021 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.424477 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.435117 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.445990 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b726bf93-4666-4d28-8dfe-8b7b88fd82ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6a3e233e4de1c86f7c71778e70332885b5e9d059928dcd320c3711cfff2df75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8aa639ce86b821591da60a6ee18ce1cb4d64b671dc50dfee3fff7d975967411f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0492b1afae66a10f8f8f8decddb20d95f42a1bbb3dc3dbe92c4cbccdb71ff9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.457752 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.457828 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.457838 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.457852 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.457862 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:52Z","lastTransitionTime":"2025-10-11T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.467394 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:23Z\\\",\\\"message\\\":\\\"ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1011 06:54:23.887731 6716 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:52Z\\\",\\\"message\\\":\\\"ions:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.93:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d71b38eb-32af-4c0f-9490-7c317c111e3a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 06:54:52.237426 7070 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-apiserver/apiserver]} name:Service_openshift-kube-apiserver/apiserver_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.93:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d71b38eb-32af-4c0f-9490-7c317c111e3a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 06:54:52.237609 7070 ovnkube.go:599] Stopped ovnkube\\\\nI1011 06:54:52.237650 7070 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1011 06:54:52.237712 7070 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.485413 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.494726 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.507658 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://649eaa9ef8bd7b297db51d6ef876f638cb0f10efb41df3e6cb169ac227f7f849\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:44Z\\\",\\\"message\\\":\\\"2025-10-11T06:53:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d\\\\n2025-10-11T06:53:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d to /host/opt/cni/bin/\\\\n2025-10-11T06:53:59Z [verbose] multus-daemon started\\\\n2025-10-11T06:53:59Z [verbose] Readiness Indicator file check\\\\n2025-10-11T06:54:44Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.517846 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.528972 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44e4649a-880c-4aec-a413-7ac0f95fcec4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f9ef26c5a288eb84c448982919e35766d8a19bcd6e1616ad40046cbd4037d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3da21ed6a1e09981293dffd230ee013186f9dd41485dcd197b18a7a48eb2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5e3da21ed6a1e09981293dffd230ee013186f9dd41485dcd197b18a7a48eb2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.542506 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.553216 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.560179 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.560207 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.560215 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.560228 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.560237 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:52Z","lastTransitionTime":"2025-10-11T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.564970 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.575717 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.587383 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.599893 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.609167 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.626757 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:52Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.662334 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.662401 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.662411 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.662429 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.662441 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:52Z","lastTransitionTime":"2025-10-11T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.764731 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.764847 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.764870 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.764903 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.764924 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:52Z","lastTransitionTime":"2025-10-11T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.867744 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.867806 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.867817 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.867834 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.867845 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:52Z","lastTransitionTime":"2025-10-11T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.970861 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.970938 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.971027 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.971059 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.971080 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:52Z","lastTransitionTime":"2025-10-11T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.993491 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:52 crc kubenswrapper[5055]: I1011 06:54:52.993530 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:52 crc kubenswrapper[5055]: E1011 06:54:52.993699 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:52 crc kubenswrapper[5055]: E1011 06:54:52.993895 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.073520 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.073683 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.073703 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.073733 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.073753 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:53Z","lastTransitionTime":"2025-10-11T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.176276 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.176333 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.176349 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.176375 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.176393 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:53Z","lastTransitionTime":"2025-10-11T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.279694 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.279738 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.279751 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.279788 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.279800 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:53Z","lastTransitionTime":"2025-10-11T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.382986 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.383051 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.383073 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.383102 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.383123 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:53Z","lastTransitionTime":"2025-10-11T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.388618 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovnkube-controller/3.log" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.486069 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.486109 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.486117 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.486131 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.486140 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:53Z","lastTransitionTime":"2025-10-11T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.589147 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.589223 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.589246 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.589279 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.589302 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:53Z","lastTransitionTime":"2025-10-11T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.692438 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.692489 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.692504 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.692527 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.692543 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:53Z","lastTransitionTime":"2025-10-11T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.704258 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.704337 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.704360 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.704387 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.704408 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:53Z","lastTransitionTime":"2025-10-11T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:53 crc kubenswrapper[5055]: E1011 06:54:53.722472 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:53Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.726345 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.726374 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.726381 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.726394 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.726403 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:53Z","lastTransitionTime":"2025-10-11T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:53 crc kubenswrapper[5055]: E1011 06:54:53.741133 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:53Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.744715 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.745061 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.745195 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.745313 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.745436 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:53Z","lastTransitionTime":"2025-10-11T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:53 crc kubenswrapper[5055]: E1011 06:54:53.758803 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:53Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.762950 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.763093 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.763210 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.763320 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.763409 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:53Z","lastTransitionTime":"2025-10-11T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:53 crc kubenswrapper[5055]: E1011 06:54:53.776251 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:53Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.779324 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.779558 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.779703 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.779876 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.780197 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:53Z","lastTransitionTime":"2025-10-11T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:53 crc kubenswrapper[5055]: E1011 06:54:53.793809 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:53Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:53 crc kubenswrapper[5055]: E1011 06:54:53.794037 5055 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.795909 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.795931 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.795939 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.795951 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.795959 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:53Z","lastTransitionTime":"2025-10-11T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.898746 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.898877 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.898900 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.898927 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.898947 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:53Z","lastTransitionTime":"2025-10-11T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.993228 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:53 crc kubenswrapper[5055]: I1011 06:54:53.993340 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:53 crc kubenswrapper[5055]: E1011 06:54:53.993405 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:53 crc kubenswrapper[5055]: E1011 06:54:53.993632 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.001703 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.001820 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.001840 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.001892 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.001913 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:54Z","lastTransitionTime":"2025-10-11T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.104395 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.104428 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.104436 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.104448 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.104459 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:54Z","lastTransitionTime":"2025-10-11T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.207645 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.207805 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.207826 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.207849 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.207864 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:54Z","lastTransitionTime":"2025-10-11T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.310700 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.310743 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.310752 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.310783 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.310796 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:54Z","lastTransitionTime":"2025-10-11T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.414222 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.414336 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.414362 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.414392 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.414414 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:54Z","lastTransitionTime":"2025-10-11T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.517175 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.517222 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.517232 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.517246 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.517254 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:54Z","lastTransitionTime":"2025-10-11T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.619951 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.620038 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.620062 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.620091 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.620108 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:54Z","lastTransitionTime":"2025-10-11T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.723142 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.723196 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.723212 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.723233 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.723248 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:54Z","lastTransitionTime":"2025-10-11T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.826318 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.826359 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.826368 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.826389 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.826399 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:54Z","lastTransitionTime":"2025-10-11T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.929240 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.929281 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.929292 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.929312 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.929324 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:54Z","lastTransitionTime":"2025-10-11T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.993300 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:54 crc kubenswrapper[5055]: I1011 06:54:54.993395 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:54 crc kubenswrapper[5055]: E1011 06:54:54.993513 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:54 crc kubenswrapper[5055]: E1011 06:54:54.993706 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.031620 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.031668 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.031681 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.031700 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.031712 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:55Z","lastTransitionTime":"2025-10-11T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.134629 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.134670 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.134678 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.134690 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.134698 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:55Z","lastTransitionTime":"2025-10-11T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.237397 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.237448 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.237460 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.237483 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.237498 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:55Z","lastTransitionTime":"2025-10-11T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.340288 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.340345 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.340356 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.340377 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.340390 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:55Z","lastTransitionTime":"2025-10-11T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.443276 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.443455 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.443477 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.443500 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.443517 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:55Z","lastTransitionTime":"2025-10-11T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.546982 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.547022 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.547031 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.547048 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.547066 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:55Z","lastTransitionTime":"2025-10-11T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.649831 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.649897 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.649914 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.649939 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.649957 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:55Z","lastTransitionTime":"2025-10-11T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.752419 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.752471 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.752485 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.752508 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.752524 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:55Z","lastTransitionTime":"2025-10-11T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.856397 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.856516 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.856536 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.856605 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.856628 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:55Z","lastTransitionTime":"2025-10-11T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.959676 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.959738 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.959755 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.959812 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.959835 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:55Z","lastTransitionTime":"2025-10-11T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.993454 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:55 crc kubenswrapper[5055]: I1011 06:54:55.993561 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:55 crc kubenswrapper[5055]: E1011 06:54:55.993672 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:55 crc kubenswrapper[5055]: E1011 06:54:55.993869 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.062426 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.062455 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.062463 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.062476 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.062484 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:56Z","lastTransitionTime":"2025-10-11T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.165236 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.165301 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.165320 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.165344 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.165363 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:56Z","lastTransitionTime":"2025-10-11T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.267987 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.268023 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.268031 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.268046 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.268056 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:56Z","lastTransitionTime":"2025-10-11T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.371201 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.371248 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.371260 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.371276 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.371289 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:56Z","lastTransitionTime":"2025-10-11T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.473889 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.473921 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.473929 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.473942 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.473952 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:56Z","lastTransitionTime":"2025-10-11T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.576395 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.576458 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.576468 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.576485 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.576496 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:56Z","lastTransitionTime":"2025-10-11T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.679027 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.679069 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.679081 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.679098 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.679110 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:56Z","lastTransitionTime":"2025-10-11T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.781200 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.781234 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.781242 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.781256 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.781264 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:56Z","lastTransitionTime":"2025-10-11T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.883742 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.883803 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.883812 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.883825 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.883836 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:56Z","lastTransitionTime":"2025-10-11T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.986311 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.986352 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.986363 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.986383 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.986397 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:56Z","lastTransitionTime":"2025-10-11T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.992636 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:56 crc kubenswrapper[5055]: I1011 06:54:56.992662 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:56 crc kubenswrapper[5055]: E1011 06:54:56.992745 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:56 crc kubenswrapper[5055]: E1011 06:54:56.992906 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.004217 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44e4649a-880c-4aec-a413-7ac0f95fcec4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f9ef26c5a288eb84c448982919e35766d8a19bcd6e1616ad40046cbd4037d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3da21ed6a1e09981293dffd230ee013186f9dd41485dcd197b18a7a48eb2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5e3da21ed6a1e09981293dffd230ee013186f9dd41485dcd197b18a7a48eb2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.016442 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.029579 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.046141 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.065539 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://649eaa9ef8bd7b297db51d6ef876f638cb0f10efb41df3e6cb169ac227f7f849\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:44Z\\\",\\\"message\\\":\\\"2025-10-11T06:53:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d\\\\n2025-10-11T06:53:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d to /host/opt/cni/bin/\\\\n2025-10-11T06:53:59Z [verbose] multus-daemon started\\\\n2025-10-11T06:53:59Z [verbose] Readiness Indicator file check\\\\n2025-10-11T06:54:44Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.075935 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.087313 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.090878 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.090915 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.090929 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.090948 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.090959 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:57Z","lastTransitionTime":"2025-10-11T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.104020 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.115441 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.126546 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.136528 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.148605 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.159235 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.170636 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b726bf93-4666-4d28-8dfe-8b7b88fd82ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6a3e233e4de1c86f7c71778e70332885b5e9d059928dcd320c3711cfff2df75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8aa639ce86b821591da60a6ee18ce1cb4d64b671dc50dfee3fff7d975967411f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0492b1afae66a10f8f8f8decddb20d95f42a1bbb3dc3dbe92c4cbccdb71ff9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.189756 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://457493810bf4752ca87d2d45260627596760ecb85966d9910d41319641255d0a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:23Z\\\",\\\"message\\\":\\\"ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}, services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.245\\\\\\\", Port:9192, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1011 06:54:23.887731 6716 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:52Z\\\",\\\"message\\\":\\\"ions:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.93:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d71b38eb-32af-4c0f-9490-7c317c111e3a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 06:54:52.237426 7070 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-apiserver/apiserver]} name:Service_openshift-kube-apiserver/apiserver_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.93:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d71b38eb-32af-4c0f-9490-7c317c111e3a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 06:54:52.237609 7070 ovnkube.go:599] Stopped ovnkube\\\\nI1011 06:54:52.237650 7070 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1011 06:54:52.237712 7070 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.193368 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.193404 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.193415 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.193431 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.193443 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:57Z","lastTransitionTime":"2025-10-11T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.200067 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.209671 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.226714 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.235413 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:54:57Z is after 2025-08-24T17:21:41Z" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.295176 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.295264 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.295273 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.295286 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.295296 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:57Z","lastTransitionTime":"2025-10-11T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.397050 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.397092 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.397104 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.397120 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.397131 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:57Z","lastTransitionTime":"2025-10-11T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.499920 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.499974 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.499987 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.500006 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.500024 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:57Z","lastTransitionTime":"2025-10-11T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.602746 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.602810 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.602821 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.602838 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.602849 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:57Z","lastTransitionTime":"2025-10-11T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.705801 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.705857 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.705868 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.705884 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.705895 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:57Z","lastTransitionTime":"2025-10-11T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.807827 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.807866 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.807875 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.807891 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.807903 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:57Z","lastTransitionTime":"2025-10-11T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.910901 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.910940 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.910949 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.910964 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.910975 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:57Z","lastTransitionTime":"2025-10-11T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.992748 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:57 crc kubenswrapper[5055]: I1011 06:54:57.992827 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:57 crc kubenswrapper[5055]: E1011 06:54:57.992883 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:54:57 crc kubenswrapper[5055]: E1011 06:54:57.993139 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.012667 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.012704 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.012714 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.012731 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.012745 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:58Z","lastTransitionTime":"2025-10-11T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.115524 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.115569 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.115584 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.115604 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.115620 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:58Z","lastTransitionTime":"2025-10-11T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.217711 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.217758 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.217788 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.217808 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.217821 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:58Z","lastTransitionTime":"2025-10-11T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.320008 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.320050 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.320061 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.320078 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.320089 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:58Z","lastTransitionTime":"2025-10-11T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.422945 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.423014 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.423023 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.423038 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.423047 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:58Z","lastTransitionTime":"2025-10-11T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.525795 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.525846 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.525861 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.525881 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.525894 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:58Z","lastTransitionTime":"2025-10-11T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.627741 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.627792 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.627824 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.627839 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.627850 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:58Z","lastTransitionTime":"2025-10-11T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.731377 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.731429 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.731448 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.731471 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.731489 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:58Z","lastTransitionTime":"2025-10-11T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.834873 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.834991 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.835021 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.835051 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.835073 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:58Z","lastTransitionTime":"2025-10-11T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.939013 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.939090 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.939110 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.939140 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.939160 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:58Z","lastTransitionTime":"2025-10-11T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.997654 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:54:58 crc kubenswrapper[5055]: E1011 06:54:58.997860 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:54:58 crc kubenswrapper[5055]: I1011 06:54:58.997908 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:54:58 crc kubenswrapper[5055]: E1011 06:54:58.998383 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.042122 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.042186 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.042212 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.042242 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.042263 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:59Z","lastTransitionTime":"2025-10-11T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.144558 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.144733 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.144758 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.144808 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.144831 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:59Z","lastTransitionTime":"2025-10-11T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.247383 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.247420 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.247434 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.247448 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.247458 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:59Z","lastTransitionTime":"2025-10-11T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.351416 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.351453 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.351465 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.351481 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.351492 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:59Z","lastTransitionTime":"2025-10-11T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.453399 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.453445 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.453454 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.453466 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.453474 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:59Z","lastTransitionTime":"2025-10-11T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.556089 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.556116 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.556126 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.556161 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.556174 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:59Z","lastTransitionTime":"2025-10-11T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.659148 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.659202 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.659212 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.659229 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.659238 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:59Z","lastTransitionTime":"2025-10-11T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.762081 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.762211 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.762227 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.762250 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.762266 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:59Z","lastTransitionTime":"2025-10-11T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.864490 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.864549 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.864565 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.864592 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.864611 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:59Z","lastTransitionTime":"2025-10-11T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.966758 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.966857 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.966879 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.966907 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.966924 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:54:59Z","lastTransitionTime":"2025-10-11T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.992519 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:54:59 crc kubenswrapper[5055]: I1011 06:54:59.992668 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:54:59 crc kubenswrapper[5055]: E1011 06:54:59.992726 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:54:59 crc kubenswrapper[5055]: E1011 06:54:59.992912 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.069593 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.069626 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.069637 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.069652 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.069662 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:00Z","lastTransitionTime":"2025-10-11T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.172401 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.172445 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.172458 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.172476 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.172489 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:00Z","lastTransitionTime":"2025-10-11T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.274709 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.274751 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.274761 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.274778 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.274804 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:00Z","lastTransitionTime":"2025-10-11T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.377049 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.377079 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.377088 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.377101 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.377111 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:00Z","lastTransitionTime":"2025-10-11T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.479072 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.479362 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.479374 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.479390 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.479402 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:00Z","lastTransitionTime":"2025-10-11T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.581574 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.581622 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.581633 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.581650 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.581660 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:00Z","lastTransitionTime":"2025-10-11T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.684342 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.684390 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.684401 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.684417 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.684427 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:00Z","lastTransitionTime":"2025-10-11T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.773008 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.773124 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.773218 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.773191774 +0000 UTC m=+148.547465581 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.773223 5055 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.773269 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.773260406 +0000 UTC m=+148.547534223 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.773305 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.773411 5055 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.773442 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.773432841 +0000 UTC m=+148.547706638 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.787406 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.787439 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.787453 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.787475 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.787487 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:00Z","lastTransitionTime":"2025-10-11T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.874151 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.874195 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.874343 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.874376 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.874395 5055 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.874448 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.874430969 +0000 UTC m=+148.648704776 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.874343 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.874474 5055 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.874485 5055 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.874518 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.874506682 +0000 UTC m=+148.648780489 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.890288 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.890332 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.890341 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.890358 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.890369 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:00Z","lastTransitionTime":"2025-10-11T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.992708 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.992819 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.992949 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:00 crc kubenswrapper[5055]: E1011 06:55:00.993030 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.994387 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.994438 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.994456 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.994482 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:00 crc kubenswrapper[5055]: I1011 06:55:00.994499 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:00Z","lastTransitionTime":"2025-10-11T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.097928 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.098000 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.098017 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.098044 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.098060 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:01Z","lastTransitionTime":"2025-10-11T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.201486 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.201536 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.201547 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.201564 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.201575 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:01Z","lastTransitionTime":"2025-10-11T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.304190 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.304236 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.304244 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.304259 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.304268 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:01Z","lastTransitionTime":"2025-10-11T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.406978 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.407023 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.407031 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.407051 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.407063 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:01Z","lastTransitionTime":"2025-10-11T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.510208 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.510260 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.510343 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.510379 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.510397 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:01Z","lastTransitionTime":"2025-10-11T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.613397 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.613449 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.613467 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.613489 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.613508 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:01Z","lastTransitionTime":"2025-10-11T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.715673 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.715717 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.715734 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.715754 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.715796 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:01Z","lastTransitionTime":"2025-10-11T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.819977 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.820041 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.820056 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.820075 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.820088 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:01Z","lastTransitionTime":"2025-10-11T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.922487 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.922548 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.922570 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.922598 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.922620 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:01Z","lastTransitionTime":"2025-10-11T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.992667 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:01 crc kubenswrapper[5055]: I1011 06:55:01.992667 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:01 crc kubenswrapper[5055]: E1011 06:55:01.992886 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:01 crc kubenswrapper[5055]: E1011 06:55:01.993019 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.025810 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.025868 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.025906 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.025946 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.025985 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:02Z","lastTransitionTime":"2025-10-11T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.128442 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.128487 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.128500 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.128516 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.128526 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:02Z","lastTransitionTime":"2025-10-11T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.232233 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.232291 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.232305 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.232324 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.232335 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:02Z","lastTransitionTime":"2025-10-11T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.334284 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.334333 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.334355 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.334378 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.334393 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:02Z","lastTransitionTime":"2025-10-11T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.437317 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.437387 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.437397 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.437413 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.437423 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:02Z","lastTransitionTime":"2025-10-11T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.540171 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.540222 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.540235 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.540254 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.540271 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:02Z","lastTransitionTime":"2025-10-11T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.644262 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.644297 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.644308 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.644322 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.644331 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:02Z","lastTransitionTime":"2025-10-11T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.747553 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.747594 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.747603 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.747618 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.747632 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:02Z","lastTransitionTime":"2025-10-11T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.850834 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.850895 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.850910 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.850961 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.850975 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:02Z","lastTransitionTime":"2025-10-11T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.953441 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.953537 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.953548 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.953566 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.953575 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:02Z","lastTransitionTime":"2025-10-11T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.993377 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:02 crc kubenswrapper[5055]: I1011 06:55:02.993428 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:02 crc kubenswrapper[5055]: E1011 06:55:02.993525 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:02 crc kubenswrapper[5055]: E1011 06:55:02.993674 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.056710 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.056784 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.056801 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.056821 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.056838 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:03Z","lastTransitionTime":"2025-10-11T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.159647 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.159699 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.159727 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.159743 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.159755 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:03Z","lastTransitionTime":"2025-10-11T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.262612 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.262672 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.262691 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.262729 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.262754 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:03Z","lastTransitionTime":"2025-10-11T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.365642 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.365685 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.365699 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.365716 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.365724 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:03Z","lastTransitionTime":"2025-10-11T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.467847 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.467912 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.467931 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.467957 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.467978 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:03Z","lastTransitionTime":"2025-10-11T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.570703 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.570753 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.570796 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.570817 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.570832 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:03Z","lastTransitionTime":"2025-10-11T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.672906 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.672949 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.672958 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.672972 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.672981 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:03Z","lastTransitionTime":"2025-10-11T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.775851 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.775916 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.775941 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.775971 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.775993 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:03Z","lastTransitionTime":"2025-10-11T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.878878 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.878943 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.878978 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.879014 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.879043 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:03Z","lastTransitionTime":"2025-10-11T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.945657 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.945695 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.945704 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.945718 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.945728 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:03Z","lastTransitionTime":"2025-10-11T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:03 crc kubenswrapper[5055]: E1011 06:55:03.967227 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.972573 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.972628 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.972646 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.972670 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.972688 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:03Z","lastTransitionTime":"2025-10-11T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:03 crc kubenswrapper[5055]: E1011 06:55:03.991906 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:03Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.992574 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:03 crc kubenswrapper[5055]: I1011 06:55:03.992634 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:03 crc kubenswrapper[5055]: E1011 06:55:03.992706 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:03 crc kubenswrapper[5055]: E1011 06:55:03.992821 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.002224 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.002255 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.002289 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.002307 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.002318 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:04Z","lastTransitionTime":"2025-10-11T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:04 crc kubenswrapper[5055]: E1011 06:55:04.020877 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.025582 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.025619 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.025631 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.025648 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.025659 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:04Z","lastTransitionTime":"2025-10-11T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:04 crc kubenswrapper[5055]: E1011 06:55:04.040433 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.044965 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.045004 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.045015 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.045033 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.045047 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:04Z","lastTransitionTime":"2025-10-11T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:04 crc kubenswrapper[5055]: E1011 06:55:04.058093 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:04Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:04 crc kubenswrapper[5055]: E1011 06:55:04.058318 5055 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.060107 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.060143 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.060154 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.060170 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.060183 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:04Z","lastTransitionTime":"2025-10-11T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.162451 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.162501 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.162516 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.162539 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.162558 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:04Z","lastTransitionTime":"2025-10-11T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.265447 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.265493 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.265508 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.265528 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.265544 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:04Z","lastTransitionTime":"2025-10-11T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.368530 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.368560 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.368569 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.368584 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.368595 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:04Z","lastTransitionTime":"2025-10-11T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.471400 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.471461 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.471482 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.471515 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.471539 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:04Z","lastTransitionTime":"2025-10-11T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.574219 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.574279 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.574297 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.574322 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.574341 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:04Z","lastTransitionTime":"2025-10-11T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.676885 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.676934 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.676950 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.676970 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.676986 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:04Z","lastTransitionTime":"2025-10-11T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.779509 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.779550 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.779565 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.779582 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.779596 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:04Z","lastTransitionTime":"2025-10-11T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.881898 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.881955 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.881974 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.881995 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.882007 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:04Z","lastTransitionTime":"2025-10-11T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.985260 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.985315 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.985330 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.985351 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.985366 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:04Z","lastTransitionTime":"2025-10-11T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.992506 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:04 crc kubenswrapper[5055]: E1011 06:55:04.992763 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:04 crc kubenswrapper[5055]: I1011 06:55:04.992504 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:04 crc kubenswrapper[5055]: E1011 06:55:04.993167 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.088262 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.088310 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.088320 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.088333 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.088345 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:05Z","lastTransitionTime":"2025-10-11T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.191582 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.191622 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.191631 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.191645 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.191655 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:05Z","lastTransitionTime":"2025-10-11T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.293410 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.293472 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.293488 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.293512 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.293526 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:05Z","lastTransitionTime":"2025-10-11T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.396411 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.396477 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.396498 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.396527 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.396554 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:05Z","lastTransitionTime":"2025-10-11T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.498811 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.498854 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.498865 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.498882 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.498893 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:05Z","lastTransitionTime":"2025-10-11T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.601217 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.601258 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.601292 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.601309 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.601319 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:05Z","lastTransitionTime":"2025-10-11T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.703091 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.703122 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.703132 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.703145 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.703155 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:05Z","lastTransitionTime":"2025-10-11T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.759208 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.760423 5055 scope.go:117] "RemoveContainer" containerID="cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae" Oct 11 06:55:05 crc kubenswrapper[5055]: E1011 06:55:05.760685 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.778757 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.792695 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b726bf93-4666-4d28-8dfe-8b7b88fd82ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6a3e233e4de1c86f7c71778e70332885b5e9d059928dcd320c3711cfff2df75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8aa639ce86b821591da60a6ee18ce1cb4d64b671dc50dfee3fff7d975967411f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0492b1afae66a10f8f8f8decddb20d95f42a1bbb3dc3dbe92c4cbccdb71ff9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.805625 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.805660 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.805670 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.805684 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.805694 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:05Z","lastTransitionTime":"2025-10-11T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.817863 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:52Z\\\",\\\"message\\\":\\\"ions:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.93:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d71b38eb-32af-4c0f-9490-7c317c111e3a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 06:54:52.237426 7070 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-apiserver/apiserver]} name:Service_openshift-kube-apiserver/apiserver_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.93:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d71b38eb-32af-4c0f-9490-7c317c111e3a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 06:54:52.237609 7070 ovnkube.go:599] Stopped ovnkube\\\\nI1011 06:54:52.237650 7070 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1011 06:54:52.237712 7070 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.827943 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.836447 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.848072 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.890918 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.907546 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.907597 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.907610 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.907628 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.907639 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:05Z","lastTransitionTime":"2025-10-11T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.916750 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.927158 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.936626 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.945012 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.964717 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://649eaa9ef8bd7b297db51d6ef876f638cb0f10efb41df3e6cb169ac227f7f849\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:44Z\\\",\\\"message\\\":\\\"2025-10-11T06:53:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d\\\\n2025-10-11T06:53:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d to /host/opt/cni/bin/\\\\n2025-10-11T06:53:59Z [verbose] multus-daemon started\\\\n2025-10-11T06:53:59Z [verbose] Readiness Indicator file check\\\\n2025-10-11T06:54:44Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.973125 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.982276 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44e4649a-880c-4aec-a413-7ac0f95fcec4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f9ef26c5a288eb84c448982919e35766d8a19bcd6e1616ad40046cbd4037d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3da21ed6a1e09981293dffd230ee013186f9dd41485dcd197b18a7a48eb2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5e3da21ed6a1e09981293dffd230ee013186f9dd41485dcd197b18a7a48eb2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.993353 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.993374 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:05 crc kubenswrapper[5055]: E1011 06:55:05.993470 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:05 crc kubenswrapper[5055]: E1011 06:55:05.993537 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:05 crc kubenswrapper[5055]: I1011 06:55:05.993838 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:05Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.004413 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.009690 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.009721 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.009731 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.009744 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.009752 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:06Z","lastTransitionTime":"2025-10-11T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.024654 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.034598 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.045501 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:06Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.111642 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.111678 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.111687 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.111702 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.111712 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:06Z","lastTransitionTime":"2025-10-11T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.214326 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.214359 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.214368 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.214381 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.214389 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:06Z","lastTransitionTime":"2025-10-11T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.316988 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.317054 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.317073 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.317097 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.317116 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:06Z","lastTransitionTime":"2025-10-11T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.419961 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.420033 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.420051 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.420076 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.420094 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:06Z","lastTransitionTime":"2025-10-11T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.522678 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.522725 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.522737 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.522753 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.522784 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:06Z","lastTransitionTime":"2025-10-11T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.624935 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.624971 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.624981 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.624993 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.625004 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:06Z","lastTransitionTime":"2025-10-11T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.727887 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.727935 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.727951 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.727973 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.727991 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:06Z","lastTransitionTime":"2025-10-11T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.829974 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.830013 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.830021 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.830036 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.830047 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:06Z","lastTransitionTime":"2025-10-11T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.932661 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.932701 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.932735 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.932750 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.932779 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:06Z","lastTransitionTime":"2025-10-11T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.992595 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:06 crc kubenswrapper[5055]: E1011 06:55:06.992757 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:06 crc kubenswrapper[5055]: I1011 06:55:06.992847 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:06 crc kubenswrapper[5055]: E1011 06:55:06.993375 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.004251 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.019827 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.031954 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.034502 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.034534 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.034546 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.034562 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.034575 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:07Z","lastTransitionTime":"2025-10-11T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.045734 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.056799 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.074617 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.086243 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.096259 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b726bf93-4666-4d28-8dfe-8b7b88fd82ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6a3e233e4de1c86f7c71778e70332885b5e9d059928dcd320c3711cfff2df75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8aa639ce86b821591da60a6ee18ce1cb4d64b671dc50dfee3fff7d975967411f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0492b1afae66a10f8f8f8decddb20d95f42a1bbb3dc3dbe92c4cbccdb71ff9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.112026 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:52Z\\\",\\\"message\\\":\\\"ions:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.93:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d71b38eb-32af-4c0f-9490-7c317c111e3a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 06:54:52.237426 7070 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-apiserver/apiserver]} name:Service_openshift-kube-apiserver/apiserver_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.93:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d71b38eb-32af-4c0f-9490-7c317c111e3a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 06:54:52.237609 7070 ovnkube.go:599] Stopped ovnkube\\\\nI1011 06:54:52.237650 7070 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1011 06:54:52.237712 7070 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.122095 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.131990 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.136407 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.136446 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.136455 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.136470 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.136479 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:07Z","lastTransitionTime":"2025-10-11T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.148471 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.157846 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.166873 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44e4649a-880c-4aec-a413-7ac0f95fcec4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f9ef26c5a288eb84c448982919e35766d8a19bcd6e1616ad40046cbd4037d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3da21ed6a1e09981293dffd230ee013186f9dd41485dcd197b18a7a48eb2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5e3da21ed6a1e09981293dffd230ee013186f9dd41485dcd197b18a7a48eb2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.177880 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.189433 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.199055 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.209048 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://649eaa9ef8bd7b297db51d6ef876f638cb0f10efb41df3e6cb169ac227f7f849\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:44Z\\\",\\\"message\\\":\\\"2025-10-11T06:53:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d\\\\n2025-10-11T06:53:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d to /host/opt/cni/bin/\\\\n2025-10-11T06:53:59Z [verbose] multus-daemon started\\\\n2025-10-11T06:53:59Z [verbose] Readiness Indicator file check\\\\n2025-10-11T06:54:44Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.216695 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:07Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.238196 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.238223 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.238251 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.238266 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.238275 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:07Z","lastTransitionTime":"2025-10-11T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.340536 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.340568 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.340578 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.340593 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.340603 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:07Z","lastTransitionTime":"2025-10-11T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.442440 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.442478 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.442487 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.442501 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.442511 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:07Z","lastTransitionTime":"2025-10-11T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.546348 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.546382 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.546395 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.546412 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.546424 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:07Z","lastTransitionTime":"2025-10-11T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.648663 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.648708 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.648720 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.648736 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.648747 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:07Z","lastTransitionTime":"2025-10-11T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.750964 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.751205 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.751309 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.751434 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.751576 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:07Z","lastTransitionTime":"2025-10-11T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.855307 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.855986 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.856827 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.857088 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.857334 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:07Z","lastTransitionTime":"2025-10-11T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.960264 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.960324 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.960341 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.960364 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.960382 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:07Z","lastTransitionTime":"2025-10-11T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.993061 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:07 crc kubenswrapper[5055]: I1011 06:55:07.993117 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:07 crc kubenswrapper[5055]: E1011 06:55:07.993300 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:07 crc kubenswrapper[5055]: E1011 06:55:07.993501 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.063437 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.063514 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.063536 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.063568 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.063590 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:08Z","lastTransitionTime":"2025-10-11T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.166123 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.166157 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.166167 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.166183 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.166195 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:08Z","lastTransitionTime":"2025-10-11T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.269263 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.269294 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.269302 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.269315 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.269324 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:08Z","lastTransitionTime":"2025-10-11T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.375483 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.375559 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.375577 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.375602 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.375626 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:08Z","lastTransitionTime":"2025-10-11T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.478397 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.478447 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.478461 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.478476 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.478509 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:08Z","lastTransitionTime":"2025-10-11T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.580989 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.581030 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.581040 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.581057 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.581068 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:08Z","lastTransitionTime":"2025-10-11T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.683119 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.683149 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.683158 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.683171 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.683179 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:08Z","lastTransitionTime":"2025-10-11T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.785513 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.785806 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.785921 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.786014 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.786094 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:08Z","lastTransitionTime":"2025-10-11T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.888853 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.888892 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.888902 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.888915 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.888924 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:08Z","lastTransitionTime":"2025-10-11T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.992673 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.992790 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.992801 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.992816 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.992829 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:08Z","lastTransitionTime":"2025-10-11T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.993344 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:08 crc kubenswrapper[5055]: E1011 06:55:08.993431 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:08 crc kubenswrapper[5055]: I1011 06:55:08.993583 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:08 crc kubenswrapper[5055]: E1011 06:55:08.993638 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.095350 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.095400 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.095412 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.095428 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.095440 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:09Z","lastTransitionTime":"2025-10-11T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.198000 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.198030 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.198039 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.198052 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.198061 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:09Z","lastTransitionTime":"2025-10-11T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.300733 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.300787 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.300801 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.300817 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.300826 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:09Z","lastTransitionTime":"2025-10-11T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.403160 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.403188 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.403197 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.403211 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.403222 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:09Z","lastTransitionTime":"2025-10-11T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.505813 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.506208 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.506304 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.506392 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.506469 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:09Z","lastTransitionTime":"2025-10-11T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.609022 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.609362 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.609555 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.609739 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.609991 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:09Z","lastTransitionTime":"2025-10-11T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.712691 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.713082 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.713277 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.713526 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.713757 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:09Z","lastTransitionTime":"2025-10-11T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.816966 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.817026 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.817046 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.817072 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.817093 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:09Z","lastTransitionTime":"2025-10-11T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.919911 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.920205 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.920300 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.920388 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.920477 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:09Z","lastTransitionTime":"2025-10-11T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.992938 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:09 crc kubenswrapper[5055]: I1011 06:55:09.993030 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:09 crc kubenswrapper[5055]: E1011 06:55:09.993080 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:09 crc kubenswrapper[5055]: E1011 06:55:09.993195 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.022727 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.023051 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.023141 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.023227 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.023312 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:10Z","lastTransitionTime":"2025-10-11T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.125324 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.125901 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.125992 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.126075 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.126149 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:10Z","lastTransitionTime":"2025-10-11T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.228433 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.228483 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.228496 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.228511 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.228519 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:10Z","lastTransitionTime":"2025-10-11T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.330483 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.330531 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.330544 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.330560 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.330572 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:10Z","lastTransitionTime":"2025-10-11T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.432139 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.432169 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.432178 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.432191 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.432199 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:10Z","lastTransitionTime":"2025-10-11T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.533961 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.533993 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.534004 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.534021 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.534030 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:10Z","lastTransitionTime":"2025-10-11T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.635941 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.636191 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.636261 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.636327 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.636397 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:10Z","lastTransitionTime":"2025-10-11T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.738383 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.738434 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.738447 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.738465 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.738476 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:10Z","lastTransitionTime":"2025-10-11T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.840640 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.840697 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.840713 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.840736 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.840753 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:10Z","lastTransitionTime":"2025-10-11T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.944035 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.944074 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.944086 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.944103 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.944111 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:10Z","lastTransitionTime":"2025-10-11T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.993325 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:10 crc kubenswrapper[5055]: E1011 06:55:10.993467 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:10 crc kubenswrapper[5055]: I1011 06:55:10.993482 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:10 crc kubenswrapper[5055]: E1011 06:55:10.993608 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.047845 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.048205 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.048358 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.048506 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.048882 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:11Z","lastTransitionTime":"2025-10-11T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.153033 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.153076 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.153085 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.153099 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.153108 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:11Z","lastTransitionTime":"2025-10-11T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.254646 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.254677 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.254686 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.254698 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.254707 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:11Z","lastTransitionTime":"2025-10-11T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.358010 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.358086 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.358109 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.358139 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.358159 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:11Z","lastTransitionTime":"2025-10-11T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.460063 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.460105 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.460114 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.460129 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.460139 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:11Z","lastTransitionTime":"2025-10-11T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.562468 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.562509 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.562520 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.562536 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.562545 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:11Z","lastTransitionTime":"2025-10-11T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.664949 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.664984 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.664994 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.665010 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.665022 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:11Z","lastTransitionTime":"2025-10-11T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.767494 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.767531 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.767540 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.767555 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.767565 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:11Z","lastTransitionTime":"2025-10-11T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.870130 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.870191 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.870209 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.870237 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.870252 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:11Z","lastTransitionTime":"2025-10-11T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.974344 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.974403 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.974413 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.974428 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.974438 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:11Z","lastTransitionTime":"2025-10-11T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.992699 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:11 crc kubenswrapper[5055]: I1011 06:55:11.992717 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:11 crc kubenswrapper[5055]: E1011 06:55:11.993030 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:11 crc kubenswrapper[5055]: E1011 06:55:11.993103 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.076897 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.076957 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.076981 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.077010 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.077030 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:12Z","lastTransitionTime":"2025-10-11T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.180269 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.180312 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.180328 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.180353 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.180369 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:12Z","lastTransitionTime":"2025-10-11T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.283847 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.283877 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.283886 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.283900 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.283908 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:12Z","lastTransitionTime":"2025-10-11T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.387234 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.387293 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.387308 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.387331 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.387346 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:12Z","lastTransitionTime":"2025-10-11T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.490215 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.490405 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.490415 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.490451 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.490463 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:12Z","lastTransitionTime":"2025-10-11T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.592743 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.592800 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.592812 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.592828 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.592839 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:12Z","lastTransitionTime":"2025-10-11T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.696023 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.696092 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.696117 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.696147 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.696170 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:12Z","lastTransitionTime":"2025-10-11T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.799870 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.799936 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.799954 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.799977 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.799997 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:12Z","lastTransitionTime":"2025-10-11T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.902752 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.902853 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.902876 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.902904 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.902925 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:12Z","lastTransitionTime":"2025-10-11T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.992529 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:12 crc kubenswrapper[5055]: E1011 06:55:12.992803 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:12 crc kubenswrapper[5055]: I1011 06:55:12.992529 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:12 crc kubenswrapper[5055]: E1011 06:55:12.992979 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.004745 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.004791 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.004800 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.004814 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.004823 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:13Z","lastTransitionTime":"2025-10-11T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.107058 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.107110 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.107122 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.107142 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.107155 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:13Z","lastTransitionTime":"2025-10-11T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.211608 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.211659 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.211672 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.211694 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.211709 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:13Z","lastTransitionTime":"2025-10-11T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.315044 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.315132 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.315179 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.315208 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.315229 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:13Z","lastTransitionTime":"2025-10-11T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.419293 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.419405 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.419432 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.419461 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.419483 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:13Z","lastTransitionTime":"2025-10-11T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.522155 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.522218 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.522234 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.522264 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.522279 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:13Z","lastTransitionTime":"2025-10-11T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.624623 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.624657 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.624667 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.624682 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.624693 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:13Z","lastTransitionTime":"2025-10-11T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.728151 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.728222 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.728245 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.728274 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.728297 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:13Z","lastTransitionTime":"2025-10-11T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.831359 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.831605 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.831627 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.831654 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.831674 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:13Z","lastTransitionTime":"2025-10-11T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.939646 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.939696 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.939715 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.939740 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.939760 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:13Z","lastTransitionTime":"2025-10-11T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.992479 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:13 crc kubenswrapper[5055]: I1011 06:55:13.992602 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:13 crc kubenswrapper[5055]: E1011 06:55:13.992825 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:13 crc kubenswrapper[5055]: E1011 06:55:13.993154 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.043209 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.043265 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.043287 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.043315 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.043341 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:14Z","lastTransitionTime":"2025-10-11T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.146825 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.146900 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.146922 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.146950 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.146974 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:14Z","lastTransitionTime":"2025-10-11T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.250861 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.250945 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.250965 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.250994 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.251014 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:14Z","lastTransitionTime":"2025-10-11T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.257275 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.257367 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.257395 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.257431 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.257457 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:14Z","lastTransitionTime":"2025-10-11T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:14 crc kubenswrapper[5055]: E1011 06:55:14.280028 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:14Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.285761 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.285848 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.285866 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.285897 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.285919 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:14Z","lastTransitionTime":"2025-10-11T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:14 crc kubenswrapper[5055]: E1011 06:55:14.307884 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:14Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.313972 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.314044 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.314070 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.314105 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.314128 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:14Z","lastTransitionTime":"2025-10-11T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:14 crc kubenswrapper[5055]: E1011 06:55:14.337172 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:14Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.342608 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.342663 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.342675 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.342695 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.342709 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:14Z","lastTransitionTime":"2025-10-11T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:14 crc kubenswrapper[5055]: E1011 06:55:14.362530 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:14Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.367866 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.367904 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.367916 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.367934 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.367946 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:14Z","lastTransitionTime":"2025-10-11T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:14 crc kubenswrapper[5055]: E1011 06:55:14.390097 5055 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-11T06:55:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"d49ae713-4c08-4a0c-a29b-99e24a2285c9\\\",\\\"systemUUID\\\":\\\"db761453-8050-423f-b90e-e93814339b53\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:14Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:14 crc kubenswrapper[5055]: E1011 06:55:14.390405 5055 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.393108 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.393142 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.393151 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.393167 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.393177 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:14Z","lastTransitionTime":"2025-10-11T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.496323 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.496354 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.496362 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.496376 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.496384 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:14Z","lastTransitionTime":"2025-10-11T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.599332 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.599388 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.599400 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.599418 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.599431 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:14Z","lastTransitionTime":"2025-10-11T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.702122 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.702173 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.702184 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.702203 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.702214 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:14Z","lastTransitionTime":"2025-10-11T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.722687 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs\") pod \"network-metrics-daemon-glhzm\" (UID: \"c7861c5b-622e-4cce-9360-be9885299bd4\") " pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:14 crc kubenswrapper[5055]: E1011 06:55:14.722897 5055 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:55:14 crc kubenswrapper[5055]: E1011 06:55:14.722955 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs podName:c7861c5b-622e-4cce-9360-be9885299bd4 nodeName:}" failed. No retries permitted until 2025-10-11 06:56:18.722939443 +0000 UTC m=+162.497213270 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs") pod "network-metrics-daemon-glhzm" (UID: "c7861c5b-622e-4cce-9360-be9885299bd4") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.804000 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.804036 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.804048 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.804064 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.804077 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:14Z","lastTransitionTime":"2025-10-11T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.907427 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.907464 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.907474 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.907500 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.907512 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:14Z","lastTransitionTime":"2025-10-11T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.993463 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:14 crc kubenswrapper[5055]: I1011 06:55:14.993638 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:14 crc kubenswrapper[5055]: E1011 06:55:14.993891 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:14 crc kubenswrapper[5055]: E1011 06:55:14.994075 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.010565 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.010615 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.010626 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.010644 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.010656 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:15Z","lastTransitionTime":"2025-10-11T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.112441 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.112493 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.112511 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.112532 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.112545 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:15Z","lastTransitionTime":"2025-10-11T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.216085 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.216158 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.216184 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.216215 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.216240 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:15Z","lastTransitionTime":"2025-10-11T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.318996 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.319042 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.319051 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.319067 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.319077 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:15Z","lastTransitionTime":"2025-10-11T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.422572 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.422650 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.422675 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.422707 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.422738 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:15Z","lastTransitionTime":"2025-10-11T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.524936 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.524969 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.524999 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.525011 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.525019 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:15Z","lastTransitionTime":"2025-10-11T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.628106 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.628143 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.628152 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.628164 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.628173 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:15Z","lastTransitionTime":"2025-10-11T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.731693 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.731833 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.731855 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.731885 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.731904 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:15Z","lastTransitionTime":"2025-10-11T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.836291 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.836352 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.836369 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.836394 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.836412 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:15Z","lastTransitionTime":"2025-10-11T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.940373 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.940417 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.940429 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.940449 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.940461 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:15Z","lastTransitionTime":"2025-10-11T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.992747 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:15 crc kubenswrapper[5055]: I1011 06:55:15.992990 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:15 crc kubenswrapper[5055]: E1011 06:55:15.993140 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:15 crc kubenswrapper[5055]: E1011 06:55:15.993327 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.043121 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.043186 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.043204 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.043229 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.043246 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:16Z","lastTransitionTime":"2025-10-11T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.147479 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.147560 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.147578 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.147605 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.147624 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:16Z","lastTransitionTime":"2025-10-11T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.251135 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.251199 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.251218 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.251244 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.251262 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:16Z","lastTransitionTime":"2025-10-11T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.355293 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.355725 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.355743 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.355798 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.355817 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:16Z","lastTransitionTime":"2025-10-11T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.459457 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.459491 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.459500 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.459513 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.459523 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:16Z","lastTransitionTime":"2025-10-11T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.562311 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.562343 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.562352 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.562367 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.562378 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:16Z","lastTransitionTime":"2025-10-11T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.666701 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.666755 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.666839 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.666865 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.666879 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:16Z","lastTransitionTime":"2025-10-11T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.769617 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.769755 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.769806 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.769834 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.769851 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:16Z","lastTransitionTime":"2025-10-11T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.873844 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.873970 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.873994 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.874023 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.874083 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:16Z","lastTransitionTime":"2025-10-11T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.977184 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.977252 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.977264 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.977287 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.977301 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:16Z","lastTransitionTime":"2025-10-11T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.993522 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:16 crc kubenswrapper[5055]: E1011 06:55:16.993652 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.994044 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:16 crc kubenswrapper[5055]: I1011 06:55:16.994381 5055 scope.go:117] "RemoveContainer" containerID="cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae" Oct 11 06:55:16 crc kubenswrapper[5055]: E1011 06:55:16.994434 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:16 crc kubenswrapper[5055]: E1011 06:55:16.994696 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.017116 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.039390 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://02bb77b5b49c51a9132d717920dcb8a111423c94079567572e86c01ca6a15130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38514ff217aabf1b1a9b45e95a8422f5320a0d15c5d41a2c269ee9cf09cc316c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.054321 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.068320 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-rbwct" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"14178151-9e5c-49bd-941f-5178607a5ad4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c2c9c833ffa180525ab595f11acaf2350078f40505e44b71bb588d4e93929342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://347e078fbdf791b3e1738efd0d4d82465d50a84d163fbea4c1772252429e5c11\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cf57395e09125743dbd9343e6f9c9d33ea859ada7f3df53cbb74f9c0742ef811\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b6f2e574fb2ffaa5bdca94b8904d628f3e0207b4808af7d0974d3674a644522e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6230a9b781e207cd008ead3b370b38d9138ea844228805e62a02f87ecce2557\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://364006a69524ea8d2f6114001c6ebcdf11c4a111c5afdcb963c0abbd87165af1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39eb88b835553a77af281135d61832348c1e97240499093145183028330de05\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:54:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fthjw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-rbwct\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.079125 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.079162 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.079170 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.079186 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.079195 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:17Z","lastTransitionTime":"2025-10-11T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.080791 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46789346-5a88-43a0-ad63-b530185c8ca1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://92fdbb37c990b89ab67638785d64c1e9bb8dc0b1336311324acd008889c18aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-shprt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-qtqvf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.090551 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-glhzm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c7861c5b-622e-4cce-9360-be9885299bd4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hkbc9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:10Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-glhzm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.102201 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"70a5cc61-88c5-4b52-9cbb-5e676241afc0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d4d8c98c8c012eb02cdff26d10455e0b903a62550a00f4d1838ef953fa18d2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6add5c4b4202fdf0162a2fbe0d922ace8f6c864d4faf049ffec9f73fc16792c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa67e7a34a0d06bcbb47bdd8df72d53ef04f76e3c03ed0fa9b3a091bae270880\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f4cbfd7a2d313385b3a2fdd45edf595cb6d2d8528ac66a96615cf20357a24efa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c607ccffd3bdabbe09510aa7884deb2745a8de66229ec42a07f2c9dc9b3012e7\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1011 06:53:41.685387 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1011 06:53:41.688465 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-867683540/tls.crt::/tmp/serving-cert-867683540/tls.key\\\\\\\"\\\\nI1011 06:53:56.048540 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1011 06:53:56.051443 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1011 06:53:56.051460 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1011 06:53:56.051483 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1011 06:53:56.051488 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1011 06:53:56.061893 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1011 06:53:56.061934 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061941 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1011 06:53:56.061948 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1011 06:53:56.061953 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1011 06:53:56.061956 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1011 06:53:56.061961 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1011 06:53:56.062343 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1011 06:53:56.064897 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b25fafcf8f1725109e2152ad49942760aab130d979012bb417e298838defda81\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f98b31762dec66d5c272340ec7b62ebc0050f86f6947734a72076d19436a417c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.113473 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ca39d496-263f-483d-b514-667039ec778d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a522ae40a7983ae0c2aea90694943973bf77901db147942421216358e85f69ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://115b447f80a4350afe92d1dd0a3cad4792d5608a748e4ee63b4b8e2c9a7893c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ed6f10751e8e49ec0116f177c3a508f0ceba7c89a2dd163e0ca5d8207e517ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ac8182fd08c2882e1a3b09004e69975d87a3ae7ce83c73ad0b863542c23b03d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.124788 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b726bf93-4666-4d28-8dfe-8b7b88fd82ed\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6a3e233e4de1c86f7c71778e70332885b5e9d059928dcd320c3711cfff2df75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8aa639ce86b821591da60a6ee18ce1cb4d64b671dc50dfee3fff7d975967411f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0492b1afae66a10f8f8f8decddb20d95f42a1bbb3dc3dbe92c4cbccdb71ff9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://49a9655f9398562f35615c466d10b7e3d492e50d856661cf9d2695821a2e79c4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.141851 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:52Z\\\",\\\"message\\\":\\\"ions:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.93:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d71b38eb-32af-4c0f-9490-7c317c111e3a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 06:54:52.237426 7070 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-apiserver/apiserver]} name:Service_openshift-kube-apiserver/apiserver_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.93:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {d71b38eb-32af-4c0f-9490-7c317c111e3a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1011 06:54:52.237609 7070 ovnkube.go:599] Stopped ovnkube\\\\nI1011 06:54:52.237650 7070 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1011 06:54:52.237712 7070 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:54:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvpvs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:57Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-5dg24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.151797 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5aeff201-f6bb-4cbb-ae1d-6a2e0aae7437\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1aac36c3a4cb2a310cd6c0ccd966d90da374f87588d0d9df7b91fbf5de6b939a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4bd484905b529eb87ec4135e1fcea86d71080d4664bb843b7ee402d90b400b93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kkqnf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:54:09Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-h2gdh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.169560 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0bbb6f9-9b91-431f-b9c6-417aeb22f8d4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://900244f3c0b37aca2264ced76444616b8c17e5eb181540174146f62f997262ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e1ce7202a38eb6511bd3276c87a0eb3947083c3bf73e07b4a6d279e3dbfb1354\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f8135527a03096e34be8fe430d3b4d028bbd41fda5a84c5a798b8e44d4026e0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d2f4cf904785ba90efb5a6e637ee78d0e8744d0081b26cba619920e3ed46192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa544c658528b7a9f98bfc5a69a0950ea66ad3989b3b7cc0083b9fcaafc38ea9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b493da09e1cbfc6a650f705e8253ee4ab23d5284a5e5e0899bf8de683c7ca63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f742ba35ad8a9d71796dcf5c43eab9bd70730f57e8ca8a0abde8192e95b94f38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee00dbe0650e6f36205bfb7c26215234d51a73bf9992ea7f2d9d043ce62f66d5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.179980 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vmbd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99289791-dfca-4a6e-81a1-792954f034af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9159df3ec98aeedf893d09d1c4aa90d15c7707dbb31ed109de71feeca9a1522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-szpfm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vmbd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.184323 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.184379 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.184408 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.184432 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.184448 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:17Z","lastTransitionTime":"2025-10-11T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.192814 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-892mt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b70778bc-7f9e-4ef8-b3d6-35ea5c9a9b26\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://70e48c71353cc19c8da8e0311463f7354e856b0bf6af5cf028273597ce616f38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwgpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:59Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-892mt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.203054 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44e4649a-880c-4aec-a413-7ac0f95fcec4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f9ef26c5a288eb84c448982919e35766d8a19bcd6e1616ad40046cbd4037d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5e3da21ed6a1e09981293dffd230ee013186f9dd41485dcd197b18a7a48eb2de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5e3da21ed6a1e09981293dffd230ee013186f9dd41485dcd197b18a7a48eb2de\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-11T06:53:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.213736 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ba7d175b5f0e53faf9867a4665df68eb1936a0a1fbee05380473c02337769f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.224146 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.244246 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://149eda676ec501025e4ec8e1c4c9ae2d207fa14a42e1b3ebbcbd17dabfe88901\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:53:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.276536 5055 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-4lplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:53:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-11T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://649eaa9ef8bd7b297db51d6ef876f638cb0f10efb41df3e6cb169ac227f7f849\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-11T06:54:44Z\\\",\\\"message\\\":\\\"2025-10-11T06:53:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d\\\\n2025-10-11T06:53:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5b4ce36e-4893-47a1-a64a-d6a576e65e6d to /host/opt/cni/bin/\\\\n2025-10-11T06:53:59Z [verbose] multus-daemon started\\\\n2025-10-11T06:53:59Z [verbose] Readiness Indicator file check\\\\n2025-10-11T06:54:44Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-11T06:53:57Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-11T06:54:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8vwh6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-11T06:53:56Z\\\"}}\" for pod \"openshift-multus\"/\"multus-4lplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-11T06:55:17Z is after 2025-08-24T17:21:41Z" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.287090 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.287305 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.287473 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.287591 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.287686 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:17Z","lastTransitionTime":"2025-10-11T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.390022 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.390062 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.390071 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.390085 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.390094 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:17Z","lastTransitionTime":"2025-10-11T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.492614 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.492650 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.492670 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.492685 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.492696 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:17Z","lastTransitionTime":"2025-10-11T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.595071 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.595121 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.595133 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.595150 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.595161 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:17Z","lastTransitionTime":"2025-10-11T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.698288 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.698335 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.698347 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.698364 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.698375 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:17Z","lastTransitionTime":"2025-10-11T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.802475 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.802518 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.802530 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.802548 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.802559 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:17Z","lastTransitionTime":"2025-10-11T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.905163 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.905209 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.905222 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.905241 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.905256 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:17Z","lastTransitionTime":"2025-10-11T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.992532 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:17 crc kubenswrapper[5055]: I1011 06:55:17.992633 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:17 crc kubenswrapper[5055]: E1011 06:55:17.992729 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:17 crc kubenswrapper[5055]: E1011 06:55:17.993140 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.008425 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.008464 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.008474 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.008490 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.008500 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:18Z","lastTransitionTime":"2025-10-11T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.110595 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.110649 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.110666 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.110688 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.110706 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:18Z","lastTransitionTime":"2025-10-11T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.213511 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.213546 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.213559 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.213577 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.213590 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:18Z","lastTransitionTime":"2025-10-11T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.315382 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.315421 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.315432 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.315446 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.315456 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:18Z","lastTransitionTime":"2025-10-11T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.418242 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.418907 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.418979 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.419002 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.419015 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:18Z","lastTransitionTime":"2025-10-11T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.521510 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.521572 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.521585 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.521601 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.521611 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:18Z","lastTransitionTime":"2025-10-11T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.623717 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.623754 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.623783 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.623798 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.623809 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:18Z","lastTransitionTime":"2025-10-11T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.726138 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.726179 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.726190 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.726204 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.726214 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:18Z","lastTransitionTime":"2025-10-11T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.828700 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.828744 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.828797 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.828815 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.828830 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:18Z","lastTransitionTime":"2025-10-11T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.932327 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.932478 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.932498 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.932523 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.932557 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:18Z","lastTransitionTime":"2025-10-11T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.996449 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:18 crc kubenswrapper[5055]: I1011 06:55:18.996479 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:18 crc kubenswrapper[5055]: E1011 06:55:18.996719 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:18 crc kubenswrapper[5055]: E1011 06:55:18.996857 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.035287 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.035340 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.035357 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.035379 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.035401 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:19Z","lastTransitionTime":"2025-10-11T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.141452 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.141510 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.141521 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.141538 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.141552 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:19Z","lastTransitionTime":"2025-10-11T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.244404 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.244470 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.244486 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.244517 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.244535 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:19Z","lastTransitionTime":"2025-10-11T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.348486 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.348566 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.348585 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.348612 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.348631 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:19Z","lastTransitionTime":"2025-10-11T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.452810 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.452886 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.452909 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.452939 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.452962 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:19Z","lastTransitionTime":"2025-10-11T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.556805 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.556870 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.556893 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.556922 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.556944 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:19Z","lastTransitionTime":"2025-10-11T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.660288 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.660433 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.660472 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.660546 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.660564 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:19Z","lastTransitionTime":"2025-10-11T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.763947 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.764005 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.764021 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.764045 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.764063 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:19Z","lastTransitionTime":"2025-10-11T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.867665 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.868001 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.868077 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.868147 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.868212 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:19Z","lastTransitionTime":"2025-10-11T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.971214 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.971322 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.971339 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.971357 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.971372 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:19Z","lastTransitionTime":"2025-10-11T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.993270 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:19 crc kubenswrapper[5055]: I1011 06:55:19.993340 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:19 crc kubenswrapper[5055]: E1011 06:55:19.993853 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:19 crc kubenswrapper[5055]: E1011 06:55:19.993974 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.073809 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.073851 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.073863 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.073880 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.073891 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:20Z","lastTransitionTime":"2025-10-11T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.176880 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.176916 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.176927 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.176944 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.176965 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:20Z","lastTransitionTime":"2025-10-11T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.279095 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.279152 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.279174 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.279203 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.279225 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:20Z","lastTransitionTime":"2025-10-11T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.382512 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.382578 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.382603 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.382630 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.382650 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:20Z","lastTransitionTime":"2025-10-11T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.485658 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.485717 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.485735 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.485758 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.485808 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:20Z","lastTransitionTime":"2025-10-11T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.589164 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.589555 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.589656 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.589818 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.590051 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:20Z","lastTransitionTime":"2025-10-11T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.693720 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.693910 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.693934 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.693968 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.693990 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:20Z","lastTransitionTime":"2025-10-11T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.798121 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.798188 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.798206 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.798229 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.798242 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:20Z","lastTransitionTime":"2025-10-11T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.901107 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.901242 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.901270 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.901302 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.901366 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:20Z","lastTransitionTime":"2025-10-11T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.992983 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:20 crc kubenswrapper[5055]: I1011 06:55:20.993050 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:20 crc kubenswrapper[5055]: E1011 06:55:20.993134 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:20 crc kubenswrapper[5055]: E1011 06:55:20.993239 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.003839 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.003888 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.003905 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.003923 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.003939 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:21Z","lastTransitionTime":"2025-10-11T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.106619 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.106666 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.106677 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.106693 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.106710 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:21Z","lastTransitionTime":"2025-10-11T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.210437 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.210498 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.210510 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.210530 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.210542 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:21Z","lastTransitionTime":"2025-10-11T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.313638 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.313704 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.313745 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.313834 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.313865 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:21Z","lastTransitionTime":"2025-10-11T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.416902 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.416980 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.417004 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.417034 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.417061 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:21Z","lastTransitionTime":"2025-10-11T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.519953 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.520025 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.520047 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.520078 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.520099 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:21Z","lastTransitionTime":"2025-10-11T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.622740 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.622890 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.622908 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.622929 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.622941 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:21Z","lastTransitionTime":"2025-10-11T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.725846 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.725900 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.725909 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.725925 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.725934 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:21Z","lastTransitionTime":"2025-10-11T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.829270 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.829337 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.829361 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.829394 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.829418 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:21Z","lastTransitionTime":"2025-10-11T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.931903 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.931971 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.931986 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.932006 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.932019 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:21Z","lastTransitionTime":"2025-10-11T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.992436 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:21 crc kubenswrapper[5055]: I1011 06:55:21.992519 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:21 crc kubenswrapper[5055]: E1011 06:55:21.992589 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:21 crc kubenswrapper[5055]: E1011 06:55:21.992691 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.034682 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.034801 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.034823 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.034857 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.034879 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:22Z","lastTransitionTime":"2025-10-11T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.138577 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.138622 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.138630 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.138648 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.138659 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:22Z","lastTransitionTime":"2025-10-11T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.241526 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.241573 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.241582 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.241605 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.241618 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:22Z","lastTransitionTime":"2025-10-11T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.345427 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.345507 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.345533 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.345565 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.345591 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:22Z","lastTransitionTime":"2025-10-11T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.449216 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.449305 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.449330 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.449361 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.449382 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:22Z","lastTransitionTime":"2025-10-11T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.552996 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.553207 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.553231 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.553258 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.553275 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:22Z","lastTransitionTime":"2025-10-11T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.656906 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.657031 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.657057 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.657084 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.657106 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:22Z","lastTransitionTime":"2025-10-11T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.760786 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.760837 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.760852 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.760874 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.760892 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:22Z","lastTransitionTime":"2025-10-11T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.863987 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.864039 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.864049 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.864065 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.864076 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:22Z","lastTransitionTime":"2025-10-11T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.966411 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.966471 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.966484 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.966502 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.966514 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:22Z","lastTransitionTime":"2025-10-11T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.993154 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:22 crc kubenswrapper[5055]: I1011 06:55:22.993210 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:22 crc kubenswrapper[5055]: E1011 06:55:22.993473 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:22 crc kubenswrapper[5055]: E1011 06:55:22.993859 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.069482 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.069534 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.069545 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.069564 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.069574 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:23Z","lastTransitionTime":"2025-10-11T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.173029 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.173068 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.173080 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.173094 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.173104 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:23Z","lastTransitionTime":"2025-10-11T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.276039 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.276072 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.276081 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.276096 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.276105 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:23Z","lastTransitionTime":"2025-10-11T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.380410 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.380496 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.380524 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.380560 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.380589 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:23Z","lastTransitionTime":"2025-10-11T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.483637 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.483698 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.483719 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.483745 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.483803 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:23Z","lastTransitionTime":"2025-10-11T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.587570 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.587611 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.587620 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.587639 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.587648 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:23Z","lastTransitionTime":"2025-10-11T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.690651 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.690720 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.690740 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.690817 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.690833 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:23Z","lastTransitionTime":"2025-10-11T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.793590 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.793683 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.793699 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.793739 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.793759 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:23Z","lastTransitionTime":"2025-10-11T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.896916 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.896974 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.896986 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.897002 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.897013 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:23Z","lastTransitionTime":"2025-10-11T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.993290 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:23 crc kubenswrapper[5055]: I1011 06:55:23.993296 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:23 crc kubenswrapper[5055]: E1011 06:55:23.993721 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:23 crc kubenswrapper[5055]: E1011 06:55:23.993875 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.000243 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.000276 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.000285 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.000300 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.000310 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:24Z","lastTransitionTime":"2025-10-11T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.103278 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.103334 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.103343 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.103388 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.103398 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:24Z","lastTransitionTime":"2025-10-11T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.212058 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.212123 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.212145 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.212175 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.212195 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:24Z","lastTransitionTime":"2025-10-11T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.314669 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.314737 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.314755 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.314840 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.314869 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:24Z","lastTransitionTime":"2025-10-11T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.418336 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.418397 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.418414 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.418438 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.418458 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:24Z","lastTransitionTime":"2025-10-11T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.521089 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.521125 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.521154 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.521167 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.521180 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:24Z","lastTransitionTime":"2025-10-11T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.624084 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.624149 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.624167 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.624192 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.624208 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:24Z","lastTransitionTime":"2025-10-11T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.660634 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.660692 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.660705 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.660723 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.660738 5055 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-11T06:55:24Z","lastTransitionTime":"2025-10-11T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.719796 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v"] Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.720493 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.722156 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.723893 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.724092 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.724159 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.754484 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=85.754464749 podStartE2EDuration="1m25.754464749s" podCreationTimestamp="2025-10-11 06:53:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:55:24.753915093 +0000 UTC m=+108.528188900" watchObservedRunningTime="2025-10-11 06:55:24.754464749 +0000 UTC m=+108.528738556" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.769209 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-vmbd2" podStartSLOduration=88.76918528 podStartE2EDuration="1m28.76918528s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:55:24.768362506 +0000 UTC m=+108.542636323" watchObservedRunningTime="2025-10-11 06:55:24.76918528 +0000 UTC m=+108.543459117" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.791729 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-4lplf" podStartSLOduration=88.791712286 podStartE2EDuration="1m28.791712286s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:55:24.782284143 +0000 UTC m=+108.556557990" watchObservedRunningTime="2025-10-11 06:55:24.791712286 +0000 UTC m=+108.565986083" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.791926 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-892mt" podStartSLOduration=88.791923142 podStartE2EDuration="1m28.791923142s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:55:24.791605713 +0000 UTC m=+108.565879520" watchObservedRunningTime="2025-10-11 06:55:24.791923142 +0000 UTC m=+108.566196949" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.800492 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=34.800476079 podStartE2EDuration="34.800476079s" podCreationTimestamp="2025-10-11 06:54:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:55:24.800452858 +0000 UTC m=+108.574726685" watchObservedRunningTime="2025-10-11 06:55:24.800476079 +0000 UTC m=+108.574749886" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.839717 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.839810 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.839865 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.839889 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.839922 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.877343 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podStartSLOduration=88.877324233 podStartE2EDuration="1m28.877324233s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:55:24.865124957 +0000 UTC m=+108.639398774" watchObservedRunningTime="2025-10-11 06:55:24.877324233 +0000 UTC m=+108.651598040" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.925427 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-rbwct" podStartSLOduration=88.925408855 podStartE2EDuration="1m28.925408855s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:55:24.925159907 +0000 UTC m=+108.699433724" watchObservedRunningTime="2025-10-11 06:55:24.925408855 +0000 UTC m=+108.699682662" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.940473 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.940531 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.940579 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.940583 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.940649 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.940671 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.941543 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.941904 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.942694 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-h2gdh" podStartSLOduration=88.942679952 podStartE2EDuration="1m28.942679952s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:55:24.94225155 +0000 UTC m=+108.716525347" watchObservedRunningTime="2025-10-11 06:55:24.942679952 +0000 UTC m=+108.716953759" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.954106 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.962451 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb24ac8a-1b01-452d-b161-9d8ad61fa45f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-mfm9v\" (UID: \"eb24ac8a-1b01-452d-b161-9d8ad61fa45f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.969605 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=88.969590279 podStartE2EDuration="1m28.969590279s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:55:24.969021002 +0000 UTC m=+108.743294809" watchObservedRunningTime="2025-10-11 06:55:24.969590279 +0000 UTC m=+108.743864086" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.982264 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=88.982246219 podStartE2EDuration="1m28.982246219s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:55:24.981697342 +0000 UTC m=+108.755971169" watchObservedRunningTime="2025-10-11 06:55:24.982246219 +0000 UTC m=+108.756520026" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.992642 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=59.9926239 podStartE2EDuration="59.9926239s" podCreationTimestamp="2025-10-11 06:54:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:55:24.99196109 +0000 UTC m=+108.766234897" watchObservedRunningTime="2025-10-11 06:55:24.9926239 +0000 UTC m=+108.766897707" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.993127 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:24 crc kubenswrapper[5055]: E1011 06:55:24.993222 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:24 crc kubenswrapper[5055]: I1011 06:55:24.993408 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:24 crc kubenswrapper[5055]: E1011 06:55:24.993486 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:25 crc kubenswrapper[5055]: I1011 06:55:25.038731 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" Oct 11 06:55:25 crc kubenswrapper[5055]: I1011 06:55:25.494417 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" event={"ID":"eb24ac8a-1b01-452d-b161-9d8ad61fa45f","Type":"ContainerStarted","Data":"cfc9b231405f9528372a6516c97e39610ee4f80e2a4da49ced4978c187e7dc19"} Oct 11 06:55:25 crc kubenswrapper[5055]: I1011 06:55:25.494473 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" event={"ID":"eb24ac8a-1b01-452d-b161-9d8ad61fa45f","Type":"ContainerStarted","Data":"db636b8822c155a708dee7a876495751e3df4984a563e7cc8a1ed4da5e697810"} Oct 11 06:55:25 crc kubenswrapper[5055]: I1011 06:55:25.506869 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mfm9v" podStartSLOduration=89.506857079 podStartE2EDuration="1m29.506857079s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:55:25.506249771 +0000 UTC m=+109.280523588" watchObservedRunningTime="2025-10-11 06:55:25.506857079 +0000 UTC m=+109.281130886" Oct 11 06:55:25 crc kubenswrapper[5055]: I1011 06:55:25.993165 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:25 crc kubenswrapper[5055]: I1011 06:55:25.993220 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:25 crc kubenswrapper[5055]: E1011 06:55:25.993341 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:25 crc kubenswrapper[5055]: E1011 06:55:25.993491 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:26 crc kubenswrapper[5055]: I1011 06:55:26.993247 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:26 crc kubenswrapper[5055]: I1011 06:55:26.993253 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:26 crc kubenswrapper[5055]: E1011 06:55:26.995364 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:26 crc kubenswrapper[5055]: E1011 06:55:26.995501 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:27 crc kubenswrapper[5055]: I1011 06:55:27.992588 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:27 crc kubenswrapper[5055]: E1011 06:55:27.992711 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:27 crc kubenswrapper[5055]: I1011 06:55:27.992968 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:27 crc kubenswrapper[5055]: E1011 06:55:27.993044 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:28 crc kubenswrapper[5055]: I1011 06:55:28.992813 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:28 crc kubenswrapper[5055]: I1011 06:55:28.992922 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:28 crc kubenswrapper[5055]: E1011 06:55:28.993004 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:28 crc kubenswrapper[5055]: E1011 06:55:28.993118 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:29 crc kubenswrapper[5055]: I1011 06:55:29.992576 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:29 crc kubenswrapper[5055]: E1011 06:55:29.993272 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:29 crc kubenswrapper[5055]: I1011 06:55:29.993318 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:29 crc kubenswrapper[5055]: I1011 06:55:29.994992 5055 scope.go:117] "RemoveContainer" containerID="cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae" Oct 11 06:55:29 crc kubenswrapper[5055]: E1011 06:55:29.995272 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-5dg24_openshift-ovn-kubernetes(f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad)\"" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" Oct 11 06:55:29 crc kubenswrapper[5055]: E1011 06:55:29.995561 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:30 crc kubenswrapper[5055]: I1011 06:55:30.993320 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:30 crc kubenswrapper[5055]: I1011 06:55:30.993517 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:30 crc kubenswrapper[5055]: E1011 06:55:30.994014 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:30 crc kubenswrapper[5055]: E1011 06:55:30.994083 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:31 crc kubenswrapper[5055]: I1011 06:55:31.519235 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4lplf_c2f344f5-5570-4fb6-b59d-5b881cd1d2cc/kube-multus/1.log" Oct 11 06:55:31 crc kubenswrapper[5055]: I1011 06:55:31.520123 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4lplf_c2f344f5-5570-4fb6-b59d-5b881cd1d2cc/kube-multus/0.log" Oct 11 06:55:31 crc kubenswrapper[5055]: I1011 06:55:31.520164 5055 generic.go:334] "Generic (PLEG): container finished" podID="c2f344f5-5570-4fb6-b59d-5b881cd1d2cc" containerID="649eaa9ef8bd7b297db51d6ef876f638cb0f10efb41df3e6cb169ac227f7f849" exitCode=1 Oct 11 06:55:31 crc kubenswrapper[5055]: I1011 06:55:31.520196 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4lplf" event={"ID":"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc","Type":"ContainerDied","Data":"649eaa9ef8bd7b297db51d6ef876f638cb0f10efb41df3e6cb169ac227f7f849"} Oct 11 06:55:31 crc kubenswrapper[5055]: I1011 06:55:31.520229 5055 scope.go:117] "RemoveContainer" containerID="4dddc14820e7269da5d5aa8bce769460cc0ffb919a720bed2c4830282e37575c" Oct 11 06:55:31 crc kubenswrapper[5055]: I1011 06:55:31.520609 5055 scope.go:117] "RemoveContainer" containerID="649eaa9ef8bd7b297db51d6ef876f638cb0f10efb41df3e6cb169ac227f7f849" Oct 11 06:55:31 crc kubenswrapper[5055]: E1011 06:55:31.520785 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-4lplf_openshift-multus(c2f344f5-5570-4fb6-b59d-5b881cd1d2cc)\"" pod="openshift-multus/multus-4lplf" podUID="c2f344f5-5570-4fb6-b59d-5b881cd1d2cc" Oct 11 06:55:31 crc kubenswrapper[5055]: I1011 06:55:31.992475 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:31 crc kubenswrapper[5055]: I1011 06:55:31.992594 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:31 crc kubenswrapper[5055]: E1011 06:55:31.992627 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:31 crc kubenswrapper[5055]: E1011 06:55:31.992886 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:32 crc kubenswrapper[5055]: I1011 06:55:32.525053 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4lplf_c2f344f5-5570-4fb6-b59d-5b881cd1d2cc/kube-multus/1.log" Oct 11 06:55:32 crc kubenswrapper[5055]: I1011 06:55:32.993375 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:32 crc kubenswrapper[5055]: I1011 06:55:32.993457 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:32 crc kubenswrapper[5055]: E1011 06:55:32.993563 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:32 crc kubenswrapper[5055]: E1011 06:55:32.993870 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:33 crc kubenswrapper[5055]: I1011 06:55:33.992586 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:33 crc kubenswrapper[5055]: I1011 06:55:33.992728 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:33 crc kubenswrapper[5055]: E1011 06:55:33.992824 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:33 crc kubenswrapper[5055]: E1011 06:55:33.993104 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:34 crc kubenswrapper[5055]: I1011 06:55:34.993209 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:34 crc kubenswrapper[5055]: I1011 06:55:34.993209 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:34 crc kubenswrapper[5055]: E1011 06:55:34.994629 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:34 crc kubenswrapper[5055]: E1011 06:55:34.994658 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:35 crc kubenswrapper[5055]: I1011 06:55:35.992805 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:35 crc kubenswrapper[5055]: I1011 06:55:35.992848 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:35 crc kubenswrapper[5055]: E1011 06:55:35.992973 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:35 crc kubenswrapper[5055]: E1011 06:55:35.993165 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:36 crc kubenswrapper[5055]: E1011 06:55:36.989098 5055 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Oct 11 06:55:36 crc kubenswrapper[5055]: I1011 06:55:36.992536 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:36 crc kubenswrapper[5055]: I1011 06:55:36.992625 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:36 crc kubenswrapper[5055]: E1011 06:55:36.994025 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:36 crc kubenswrapper[5055]: E1011 06:55:36.994162 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:37 crc kubenswrapper[5055]: E1011 06:55:37.069173 5055 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 11 06:55:37 crc kubenswrapper[5055]: I1011 06:55:37.993273 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:37 crc kubenswrapper[5055]: E1011 06:55:37.993440 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:37 crc kubenswrapper[5055]: I1011 06:55:37.993304 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:37 crc kubenswrapper[5055]: E1011 06:55:37.993961 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:38 crc kubenswrapper[5055]: I1011 06:55:38.995440 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:38 crc kubenswrapper[5055]: I1011 06:55:38.995474 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:38 crc kubenswrapper[5055]: E1011 06:55:38.995696 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:38 crc kubenswrapper[5055]: E1011 06:55:38.995852 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:39 crc kubenswrapper[5055]: I1011 06:55:39.992904 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:39 crc kubenswrapper[5055]: I1011 06:55:39.993106 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:39 crc kubenswrapper[5055]: E1011 06:55:39.993193 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:39 crc kubenswrapper[5055]: E1011 06:55:39.993370 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:40 crc kubenswrapper[5055]: I1011 06:55:40.992590 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:40 crc kubenswrapper[5055]: E1011 06:55:40.992728 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:40 crc kubenswrapper[5055]: I1011 06:55:40.992590 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:40 crc kubenswrapper[5055]: E1011 06:55:40.992829 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:41 crc kubenswrapper[5055]: I1011 06:55:41.992545 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:41 crc kubenswrapper[5055]: E1011 06:55:41.992887 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:41 crc kubenswrapper[5055]: I1011 06:55:41.992545 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:41 crc kubenswrapper[5055]: E1011 06:55:41.992983 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:42 crc kubenswrapper[5055]: E1011 06:55:42.070526 5055 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 11 06:55:42 crc kubenswrapper[5055]: I1011 06:55:42.993237 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:42 crc kubenswrapper[5055]: I1011 06:55:42.993358 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:42 crc kubenswrapper[5055]: E1011 06:55:42.993514 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:42 crc kubenswrapper[5055]: E1011 06:55:42.993963 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:42 crc kubenswrapper[5055]: I1011 06:55:42.994207 5055 scope.go:117] "RemoveContainer" containerID="cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae" Oct 11 06:55:43 crc kubenswrapper[5055]: I1011 06:55:43.564055 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovnkube-controller/3.log" Oct 11 06:55:43 crc kubenswrapper[5055]: I1011 06:55:43.567181 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerStarted","Data":"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172"} Oct 11 06:55:43 crc kubenswrapper[5055]: I1011 06:55:43.567602 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:55:43 crc kubenswrapper[5055]: I1011 06:55:43.594630 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podStartSLOduration=107.594610881 podStartE2EDuration="1m47.594610881s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:55:43.593322892 +0000 UTC m=+127.367596709" watchObservedRunningTime="2025-10-11 06:55:43.594610881 +0000 UTC m=+127.368884688" Oct 11 06:55:43 crc kubenswrapper[5055]: I1011 06:55:43.905460 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-glhzm"] Oct 11 06:55:43 crc kubenswrapper[5055]: I1011 06:55:43.905574 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:43 crc kubenswrapper[5055]: E1011 06:55:43.905676 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:43 crc kubenswrapper[5055]: I1011 06:55:43.992422 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:43 crc kubenswrapper[5055]: E1011 06:55:43.992548 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:44 crc kubenswrapper[5055]: I1011 06:55:44.993420 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:44 crc kubenswrapper[5055]: E1011 06:55:44.993553 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:44 crc kubenswrapper[5055]: I1011 06:55:44.993792 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:44 crc kubenswrapper[5055]: E1011 06:55:44.993847 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:45 crc kubenswrapper[5055]: I1011 06:55:45.993434 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:45 crc kubenswrapper[5055]: I1011 06:55:45.993437 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:45 crc kubenswrapper[5055]: E1011 06:55:45.993630 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:45 crc kubenswrapper[5055]: E1011 06:55:45.993711 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:45 crc kubenswrapper[5055]: I1011 06:55:45.993745 5055 scope.go:117] "RemoveContainer" containerID="649eaa9ef8bd7b297db51d6ef876f638cb0f10efb41df3e6cb169ac227f7f849" Oct 11 06:55:46 crc kubenswrapper[5055]: I1011 06:55:46.578149 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4lplf_c2f344f5-5570-4fb6-b59d-5b881cd1d2cc/kube-multus/1.log" Oct 11 06:55:46 crc kubenswrapper[5055]: I1011 06:55:46.578624 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4lplf" event={"ID":"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc","Type":"ContainerStarted","Data":"f04261c508ee02b5a266697e678b45e465a2d37a77e6fe7b037dea3c5c7aaf65"} Oct 11 06:55:46 crc kubenswrapper[5055]: I1011 06:55:46.992577 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:46 crc kubenswrapper[5055]: I1011 06:55:46.992610 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:46 crc kubenswrapper[5055]: E1011 06:55:46.993950 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:46 crc kubenswrapper[5055]: E1011 06:55:46.994062 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:47 crc kubenswrapper[5055]: E1011 06:55:47.071327 5055 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 11 06:55:47 crc kubenswrapper[5055]: I1011 06:55:47.992615 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:47 crc kubenswrapper[5055]: E1011 06:55:47.992749 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:47 crc kubenswrapper[5055]: I1011 06:55:47.992900 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:47 crc kubenswrapper[5055]: E1011 06:55:47.993541 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:48 crc kubenswrapper[5055]: I1011 06:55:48.995100 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:48 crc kubenswrapper[5055]: E1011 06:55:48.995202 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:48 crc kubenswrapper[5055]: I1011 06:55:48.995361 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:48 crc kubenswrapper[5055]: E1011 06:55:48.995402 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:49 crc kubenswrapper[5055]: I1011 06:55:49.992967 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:49 crc kubenswrapper[5055]: I1011 06:55:49.992967 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:49 crc kubenswrapper[5055]: E1011 06:55:49.993269 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:49 crc kubenswrapper[5055]: E1011 06:55:49.993523 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:50 crc kubenswrapper[5055]: I1011 06:55:50.992713 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:50 crc kubenswrapper[5055]: I1011 06:55:50.992710 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:50 crc kubenswrapper[5055]: E1011 06:55:50.993024 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 11 06:55:50 crc kubenswrapper[5055]: E1011 06:55:50.993106 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 11 06:55:51 crc kubenswrapper[5055]: I1011 06:55:51.993458 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:51 crc kubenswrapper[5055]: E1011 06:55:51.993610 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 11 06:55:51 crc kubenswrapper[5055]: I1011 06:55:51.993759 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:51 crc kubenswrapper[5055]: E1011 06:55:51.993907 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-glhzm" podUID="c7861c5b-622e-4cce-9360-be9885299bd4" Oct 11 06:55:52 crc kubenswrapper[5055]: I1011 06:55:52.993032 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:55:52 crc kubenswrapper[5055]: I1011 06:55:52.993102 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:55:52 crc kubenswrapper[5055]: I1011 06:55:52.996791 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Oct 11 06:55:52 crc kubenswrapper[5055]: I1011 06:55:52.997070 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Oct 11 06:55:52 crc kubenswrapper[5055]: I1011 06:55:52.997070 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Oct 11 06:55:52 crc kubenswrapper[5055]: I1011 06:55:52.997155 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Oct 11 06:55:53 crc kubenswrapper[5055]: I1011 06:55:53.992646 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:55:53 crc kubenswrapper[5055]: I1011 06:55:53.992717 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:55:53 crc kubenswrapper[5055]: I1011 06:55:53.994853 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Oct 11 06:55:53 crc kubenswrapper[5055]: I1011 06:55:53.995493 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.583196 5055 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.620362 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.620804 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.624780 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.625739 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.626417 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-vr25r"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.626832 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.627432 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-f5tqq"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.627816 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.629508 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.629835 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-jsqdp"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.630126 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.630432 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.634074 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.634248 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4jldr"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.634676 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.636492 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.638904 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bdszt"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.639548 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.641570 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.641595 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.641720 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.641979 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.645696 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.645827 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.645960 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.646002 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.646454 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.646720 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.647023 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.647167 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.647269 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.647393 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.647588 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.651929 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-t28vk"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.652338 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.652624 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.653618 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.654087 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.654540 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.654829 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.652333 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.655341 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.655488 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.655552 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.655611 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.655042 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.656158 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.656354 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.656506 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.656940 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.657213 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.657709 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.657743 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.676560 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.676802 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.677502 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.678411 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.678819 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.679046 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.680237 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.680805 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.681297 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.681419 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.681541 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.681619 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.682183 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.682515 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.682816 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.682980 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.683203 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.686108 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.691965 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.694294 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.694444 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.694556 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.694664 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.694824 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.694926 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.694929 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.695154 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.696961 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-svqd7"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.697421 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.697538 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.698000 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.698031 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.698517 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.698727 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.703965 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.704916 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-d5vkx"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.705293 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-56x9d"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.705566 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.706265 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-d5vkx" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.720508 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-rdv7m"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.721058 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.721247 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.721475 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.721785 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.735123 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.735550 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-rdv7m" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.736024 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.740855 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.741472 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.741563 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.742158 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.742334 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.742367 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.742370 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.742535 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.742868 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.742982 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.743085 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.742987 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.743226 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.743244 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.743286 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.743399 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.743452 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.743550 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.743591 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.743714 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.743830 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.743932 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.744021 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.744108 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.744232 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.744269 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.744425 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.744480 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.744565 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.744625 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.744787 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.744818 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.744900 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.744234 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.744434 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.745010 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.744574 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.745075 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.743245 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.745219 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.747689 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5x9qp"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.748228 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-85c46"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.748663 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-bxbhq"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.749063 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.749313 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5x9qp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.749462 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-85c46" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.749499 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.749933 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.751809 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.752430 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.752836 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.752983 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-v2zwv"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.753973 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v2zwv" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.755902 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fflht"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.756515 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.753002 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.758081 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.758791 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.760079 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.765973 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.766583 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.767340 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-bx7lp"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.767798 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-bx7lp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.771589 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.771894 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.773059 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.773253 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.773804 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.774382 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.774718 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.776480 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.776883 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.776909 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a7df9e2-1bec-43f6-9459-71b36b81372f-serving-cert\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.776926 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7a7df9e2-1bec-43f6-9459-71b36b81372f-audit-dir\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.776943 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.776960 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7a7df9e2-1bec-43f6-9459-71b36b81372f-etcd-client\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.776968 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.776976 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea2861ee-56c8-4a00-93ac-ba71b211ad7a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-tt849\" (UID: \"ea2861ee-56c8-4a00-93ac-ba71b211ad7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.776995 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777009 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777032 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777048 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777063 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6g7l\" (UniqueName: \"kubernetes.io/projected/c7f95b49-339c-401f-975f-e356a2077b01-kube-api-access-h6g7l\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777081 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/18e6cd5e-9a1b-44a3-971c-e5f0784c05c2-machine-approver-tls\") pod \"machine-approver-56656f9798-2xzzt\" (UID: \"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777097 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z55pq\" (UniqueName: \"kubernetes.io/projected/0013fda0-1d6c-42ea-991b-4560a7c988a0-kube-api-access-z55pq\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777111 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777128 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-client-ca\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777143 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0013fda0-1d6c-42ea-991b-4560a7c988a0-serving-cert\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777158 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0013fda0-1d6c-42ea-991b-4560a7c988a0-audit-dir\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777176 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777207 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-audit\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777231 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f5d0c4c-155d-4fdc-851c-779e57302a8c-client-ca\") pod \"route-controller-manager-6576b87f9c-vx6cb\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777364 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0013fda0-1d6c-42ea-991b-4560a7c988a0-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777391 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777409 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c7f95b49-339c-401f-975f-e356a2077b01-serving-cert\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777431 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-config\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777462 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0013fda0-1d6c-42ea-991b-4560a7c988a0-etcd-client\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777485 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8sncg\" (UniqueName: \"kubernetes.io/projected/0f5d0c4c-155d-4fdc-851c-779e57302a8c-kube-api-access-8sncg\") pod \"route-controller-manager-6576b87f9c-vx6cb\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777507 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-audit-policies\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777529 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777650 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea2861ee-56c8-4a00-93ac-ba71b211ad7a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-tt849\" (UID: \"ea2861ee-56c8-4a00-93ac-ba71b211ad7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777704 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0013fda0-1d6c-42ea-991b-4560a7c988a0-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777741 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfwpb\" (UniqueName: \"kubernetes.io/projected/18e6cd5e-9a1b-44a3-971c-e5f0784c05c2-kube-api-access-kfwpb\") pod \"machine-approver-56656f9798-2xzzt\" (UID: \"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777808 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbqfj\" (UniqueName: \"kubernetes.io/projected/ea2861ee-56c8-4a00-93ac-ba71b211ad7a-kube-api-access-cbqfj\") pod \"openshift-controller-manager-operator-756b6f6bc6-tt849\" (UID: \"ea2861ee-56c8-4a00-93ac-ba71b211ad7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777836 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777859 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/7a7df9e2-1bec-43f6-9459-71b36b81372f-node-pullsecrets\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777877 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777896 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-etcd-serving-ca\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777914 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmvbg\" (UniqueName: \"kubernetes.io/projected/c8de3d54-de40-4186-be10-f0a394a18830-kube-api-access-gmvbg\") pod \"downloads-7954f5f757-rdv7m\" (UID: \"c8de3d54-de40-4186-be10-f0a394a18830\") " pod="openshift-console/downloads-7954f5f757-rdv7m" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777940 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-config\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777961 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f5d0c4c-155d-4fdc-851c-779e57302a8c-config\") pod \"route-controller-manager-6576b87f9c-vx6cb\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.777976 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f5d0c4c-155d-4fdc-851c-779e57302a8c-serving-cert\") pod \"route-controller-manager-6576b87f9c-vx6cb\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.778008 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7x8v\" (UniqueName: \"kubernetes.io/projected/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-kube-api-access-n7x8v\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.778029 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18e6cd5e-9a1b-44a3-971c-e5f0784c05c2-config\") pod \"machine-approver-56656f9798-2xzzt\" (UID: \"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.778046 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/18e6cd5e-9a1b-44a3-971c-e5f0784c05c2-auth-proxy-config\") pod \"machine-approver-56656f9798-2xzzt\" (UID: \"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.778089 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-audit-dir\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.778103 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.778124 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-image-import-ca\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.778144 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0013fda0-1d6c-42ea-991b-4560a7c988a0-encryption-config\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.778166 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/7a7df9e2-1bec-43f6-9459-71b36b81372f-encryption-config\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.778182 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tl54b\" (UniqueName: \"kubernetes.io/projected/7a7df9e2-1bec-43f6-9459-71b36b81372f-kube-api-access-tl54b\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.778201 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0013fda0-1d6c-42ea-991b-4560a7c988a0-audit-policies\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.779645 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.781645 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.781749 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.782314 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.783302 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.784060 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.784885 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-gjjf2"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.785633 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.785800 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.785847 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.788180 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.789461 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4jldr"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.795945 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.799535 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-vr25r"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.800956 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.802171 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.806578 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-v6lr4"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.807281 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-v6lr4" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.809004 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-f5tqq"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.809861 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-d5vkx"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.811515 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.814796 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.817686 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.820332 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.823234 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-56x9d"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.823944 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.824352 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fflht"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.826347 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.828006 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-jsqdp"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.829388 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bdszt"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.832909 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.835360 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.837302 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.838599 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-svqd7"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.840193 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5x9qp"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.841911 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.843038 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-t28vk"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.843094 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.845052 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-rdv7m"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.846692 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.848297 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-bx7lp"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.850746 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.852454 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.854379 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.857050 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-v2zwv"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.858838 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-rdsf5"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.859500 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-rdsf5" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.860826 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-85c46"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.861894 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-b9tgz"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.862094 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.862849 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.863022 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-b9tgz" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.865335 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.866382 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-b9tgz"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.868277 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.869493 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-gjjf2"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.871666 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-rdsf5"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.873795 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-ft9nj"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.874806 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.875126 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-ft9nj"] Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.878902 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.878938 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7a7df9e2-1bec-43f6-9459-71b36b81372f-etcd-client\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.878961 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea2861ee-56c8-4a00-93ac-ba71b211ad7a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-tt849\" (UID: \"ea2861ee-56c8-4a00-93ac-ba71b211ad7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.878990 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879013 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879056 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879082 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879106 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6g7l\" (UniqueName: \"kubernetes.io/projected/c7f95b49-339c-401f-975f-e356a2077b01-kube-api-access-h6g7l\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879127 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/18e6cd5e-9a1b-44a3-971c-e5f0784c05c2-machine-approver-tls\") pod \"machine-approver-56656f9798-2xzzt\" (UID: \"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879150 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z55pq\" (UniqueName: \"kubernetes.io/projected/0013fda0-1d6c-42ea-991b-4560a7c988a0-kube-api-access-z55pq\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879258 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879285 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-client-ca\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879305 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0013fda0-1d6c-42ea-991b-4560a7c988a0-serving-cert\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879326 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0013fda0-1d6c-42ea-991b-4560a7c988a0-audit-dir\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879345 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879366 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f5d0c4c-155d-4fdc-851c-779e57302a8c-client-ca\") pod \"route-controller-manager-6576b87f9c-vx6cb\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879388 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-audit\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879414 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-config\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879443 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0013fda0-1d6c-42ea-991b-4560a7c988a0-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879464 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879484 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c7f95b49-339c-401f-975f-e356a2077b01-serving-cert\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879505 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0013fda0-1d6c-42ea-991b-4560a7c988a0-etcd-client\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879529 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8sncg\" (UniqueName: \"kubernetes.io/projected/0f5d0c4c-155d-4fdc-851c-779e57302a8c-kube-api-access-8sncg\") pod \"route-controller-manager-6576b87f9c-vx6cb\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879553 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-audit-policies\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879577 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879600 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea2861ee-56c8-4a00-93ac-ba71b211ad7a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-tt849\" (UID: \"ea2861ee-56c8-4a00-93ac-ba71b211ad7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879640 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0013fda0-1d6c-42ea-991b-4560a7c988a0-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879665 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfwpb\" (UniqueName: \"kubernetes.io/projected/18e6cd5e-9a1b-44a3-971c-e5f0784c05c2-kube-api-access-kfwpb\") pod \"machine-approver-56656f9798-2xzzt\" (UID: \"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879698 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbqfj\" (UniqueName: \"kubernetes.io/projected/ea2861ee-56c8-4a00-93ac-ba71b211ad7a-kube-api-access-cbqfj\") pod \"openshift-controller-manager-operator-756b6f6bc6-tt849\" (UID: \"ea2861ee-56c8-4a00-93ac-ba71b211ad7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879723 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879745 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/7a7df9e2-1bec-43f6-9459-71b36b81372f-node-pullsecrets\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879783 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879806 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-etcd-serving-ca\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879832 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmvbg\" (UniqueName: \"kubernetes.io/projected/c8de3d54-de40-4186-be10-f0a394a18830-kube-api-access-gmvbg\") pod \"downloads-7954f5f757-rdv7m\" (UID: \"c8de3d54-de40-4186-be10-f0a394a18830\") " pod="openshift-console/downloads-7954f5f757-rdv7m" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879855 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-config\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879878 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f5d0c4c-155d-4fdc-851c-779e57302a8c-config\") pod \"route-controller-manager-6576b87f9c-vx6cb\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879898 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f5d0c4c-155d-4fdc-851c-779e57302a8c-serving-cert\") pod \"route-controller-manager-6576b87f9c-vx6cb\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879920 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7x8v\" (UniqueName: \"kubernetes.io/projected/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-kube-api-access-n7x8v\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879943 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18e6cd5e-9a1b-44a3-971c-e5f0784c05c2-config\") pod \"machine-approver-56656f9798-2xzzt\" (UID: \"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879963 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/18e6cd5e-9a1b-44a3-971c-e5f0784c05c2-auth-proxy-config\") pod \"machine-approver-56656f9798-2xzzt\" (UID: \"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.879995 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-audit-dir\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.880014 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.880030 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-image-import-ca\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.880045 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0013fda0-1d6c-42ea-991b-4560a7c988a0-encryption-config\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.880062 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/7a7df9e2-1bec-43f6-9459-71b36b81372f-encryption-config\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.880077 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tl54b\" (UniqueName: \"kubernetes.io/projected/7a7df9e2-1bec-43f6-9459-71b36b81372f-kube-api-access-tl54b\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.880093 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0013fda0-1d6c-42ea-991b-4560a7c988a0-audit-policies\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.880100 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.880109 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.880123 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a7df9e2-1bec-43f6-9459-71b36b81372f-serving-cert\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.880139 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7a7df9e2-1bec-43f6-9459-71b36b81372f-audit-dir\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.880192 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7a7df9e2-1bec-43f6-9459-71b36b81372f-audit-dir\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.880698 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-etcd-serving-ca\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.881779 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f5d0c4c-155d-4fdc-851c-779e57302a8c-client-ca\") pod \"route-controller-manager-6576b87f9c-vx6cb\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.881873 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-config\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.881927 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.882455 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.882486 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-audit\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.882786 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-audit-dir\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.883252 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-audit-policies\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.883587 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea2861ee-56c8-4a00-93ac-ba71b211ad7a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-tt849\" (UID: \"ea2861ee-56c8-4a00-93ac-ba71b211ad7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.883822 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18e6cd5e-9a1b-44a3-971c-e5f0784c05c2-config\") pod \"machine-approver-56656f9798-2xzzt\" (UID: \"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.884014 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0013fda0-1d6c-42ea-991b-4560a7c988a0-audit-dir\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.884377 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.884446 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/7a7df9e2-1bec-43f6-9459-71b36b81372f-node-pullsecrets\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.884673 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-client-ca\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.884991 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.885137 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ea2861ee-56c8-4a00-93ac-ba71b211ad7a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-tt849\" (UID: \"ea2861ee-56c8-4a00-93ac-ba71b211ad7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.885156 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.885512 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.885516 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.886121 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f5d0c4c-155d-4fdc-851c-779e57302a8c-serving-cert\") pod \"route-controller-manager-6576b87f9c-vx6cb\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.886378 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.886907 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/18e6cd5e-9a1b-44a3-971c-e5f0784c05c2-machine-approver-tls\") pod \"machine-approver-56656f9798-2xzzt\" (UID: \"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.887227 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/18e6cd5e-9a1b-44a3-971c-e5f0784c05c2-auth-proxy-config\") pod \"machine-approver-56656f9798-2xzzt\" (UID: \"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.887332 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.887412 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.888261 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.888810 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.888830 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.889087 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c7f95b49-339c-401f-975f-e356a2077b01-serving-cert\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.889175 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0013fda0-1d6c-42ea-991b-4560a7c988a0-audit-policies\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.890307 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0013fda0-1d6c-42ea-991b-4560a7c988a0-encryption-config\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.890404 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0013fda0-1d6c-42ea-991b-4560a7c988a0-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.890566 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0013fda0-1d6c-42ea-991b-4560a7c988a0-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.890930 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f5d0c4c-155d-4fdc-851c-779e57302a8c-config\") pod \"route-controller-manager-6576b87f9c-vx6cb\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.891337 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-image-import-ca\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.891507 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0013fda0-1d6c-42ea-991b-4560a7c988a0-serving-cert\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.891965 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0013fda0-1d6c-42ea-991b-4560a7c988a0-etcd-client\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.898130 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/7a7df9e2-1bec-43f6-9459-71b36b81372f-encryption-config\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.898198 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7a7df9e2-1bec-43f6-9459-71b36b81372f-etcd-client\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.898397 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7a7df9e2-1bec-43f6-9459-71b36b81372f-config\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.898538 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a7df9e2-1bec-43f6-9459-71b36b81372f-serving-cert\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.903264 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.943315 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.953168 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.963643 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Oct 11 06:55:55 crc kubenswrapper[5055]: I1011 06:55:55.985680 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.005000 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.023791 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.043181 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.062997 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.082753 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.103300 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.123649 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.142913 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.163828 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.182849 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.203587 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.223553 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.243082 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.263537 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.304495 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.323358 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.343605 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.364100 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.383596 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.403322 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.422715 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.443522 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.463283 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.483128 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.503178 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.523537 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.542629 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.563126 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.583357 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.603076 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.622796 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.643753 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.663697 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.688482 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.703239 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.724173 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.743859 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.762171 5055 request.go:700] Waited for 1.004380929s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/persistentvolumes/pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.783221 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.789619 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53268028-cc7b-4417-bada-ac723f7e2527-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.789669 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/7c8fb95a-98dc-4592-afe6-195dc531d8df-ca-trust-extracted\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.789697 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/7c8fb95a-98dc-4592-afe6-195dc531d8df-registry-certificates\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.789716 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/ffc5a842-e6af-43b9-b41e-f5334b5bac14-etcd-service-ca\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.789732 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/80947be8-e00a-4386-b49a-a42885cc132d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-mnc7h\" (UID: \"80947be8-e00a-4386-b49a-a42885cc132d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.789758 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69f1192a-51c9-4939-9431-f39e329d25b8-config\") pod \"openshift-apiserver-operator-796bbdcf4f-d6kpg\" (UID: \"69f1192a-51c9-4939-9431-f39e329d25b8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.789815 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qf552\" (UniqueName: \"kubernetes.io/projected/69f1192a-51c9-4939-9431-f39e329d25b8-kube-api-access-qf552\") pod \"openshift-apiserver-operator-796bbdcf4f-d6kpg\" (UID: \"69f1192a-51c9-4939-9431-f39e329d25b8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.789840 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/53268028-cc7b-4417-bada-ac723f7e2527-serving-cert\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.789864 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/55f7075b-3a94-416c-830d-dab8afe9e6e5-available-featuregates\") pod \"openshift-config-operator-7777fb866f-gr9mj\" (UID: \"55f7075b-3a94-416c-830d-dab8afe9e6e5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.789889 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9grj\" (UniqueName: \"kubernetes.io/projected/51689c82-1eb8-4080-ab6c-44759c1c5b1a-kube-api-access-b9grj\") pod \"cluster-samples-operator-665b6dd947-rvswx\" (UID: \"51689c82-1eb8-4080-ab6c-44759c1c5b1a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.789912 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqqq7\" (UniqueName: \"kubernetes.io/projected/499f5ce1-e32c-477b-9afc-b3c63ee6b55a-kube-api-access-bqqq7\") pod \"console-operator-58897d9998-4jldr\" (UID: \"499f5ce1-e32c-477b-9afc-b3c63ee6b55a\") " pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.789933 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7c8fb95a-98dc-4592-afe6-195dc531d8df-trusted-ca\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.789948 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/51689c82-1eb8-4080-ab6c-44759c1c5b1a-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-rvswx\" (UID: \"51689c82-1eb8-4080-ab6c-44759c1c5b1a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.789964 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/499f5ce1-e32c-477b-9afc-b3c63ee6b55a-serving-cert\") pod \"console-operator-58897d9998-4jldr\" (UID: \"499f5ce1-e32c-477b-9afc-b3c63ee6b55a\") " pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790022 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/7c8fb95a-98dc-4592-afe6-195dc531d8df-installation-pull-secrets\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790068 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-registry-tls\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790093 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/199dbd10-cbbc-4f91-bc11-6a7ea9dc6609-images\") pod \"machine-api-operator-5694c8668f-bdszt\" (UID: \"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790115 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw2cn\" (UniqueName: \"kubernetes.io/projected/ffc5a842-e6af-43b9-b41e-f5334b5bac14-kube-api-access-gw2cn\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790152 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/80947be8-e00a-4386-b49a-a42885cc132d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-mnc7h\" (UID: \"80947be8-e00a-4386-b49a-a42885cc132d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790202 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffc5a842-e6af-43b9-b41e-f5334b5bac14-serving-cert\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790353 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/199dbd10-cbbc-4f91-bc11-6a7ea9dc6609-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bdszt\" (UID: \"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790381 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kftfn\" (UniqueName: \"kubernetes.io/projected/55f7075b-3a94-416c-830d-dab8afe9e6e5-kube-api-access-kftfn\") pod \"openshift-config-operator-7777fb866f-gr9mj\" (UID: \"55f7075b-3a94-416c-830d-dab8afe9e6e5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790400 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xslsb\" (UniqueName: \"kubernetes.io/projected/c2ee355c-7c34-4626-aac3-99d9db842b7e-kube-api-access-xslsb\") pod \"dns-operator-744455d44c-d5vkx\" (UID: \"c2ee355c-7c34-4626-aac3-99d9db842b7e\") " pod="openshift-dns-operator/dns-operator-744455d44c-d5vkx" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790437 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffc5a842-e6af-43b9-b41e-f5334b5bac14-config\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790546 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/499f5ce1-e32c-477b-9afc-b3c63ee6b55a-trusted-ca\") pod \"console-operator-58897d9998-4jldr\" (UID: \"499f5ce1-e32c-477b-9afc-b3c63ee6b55a\") " pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790575 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/80947be8-e00a-4386-b49a-a42885cc132d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-mnc7h\" (UID: \"80947be8-e00a-4386-b49a-a42885cc132d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790643 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69f1192a-51c9-4939-9431-f39e329d25b8-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-d6kpg\" (UID: \"69f1192a-51c9-4939-9431-f39e329d25b8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790670 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-bound-sa-token\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790686 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c2ee355c-7c34-4626-aac3-99d9db842b7e-metrics-tls\") pod \"dns-operator-744455d44c-d5vkx\" (UID: \"c2ee355c-7c34-4626-aac3-99d9db842b7e\") " pod="openshift-dns-operator/dns-operator-744455d44c-d5vkx" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790711 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45f55\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-kube-api-access-45f55\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790739 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53268028-cc7b-4417-bada-ac723f7e2527-service-ca-bundle\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790756 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/199dbd10-cbbc-4f91-bc11-6a7ea9dc6609-config\") pod \"machine-api-operator-5694c8668f-bdszt\" (UID: \"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790787 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ffc5a842-e6af-43b9-b41e-f5334b5bac14-etcd-client\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790821 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53268028-cc7b-4417-bada-ac723f7e2527-config\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790837 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n942m\" (UniqueName: \"kubernetes.io/projected/199dbd10-cbbc-4f91-bc11-6a7ea9dc6609-kube-api-access-n942m\") pod \"machine-api-operator-5694c8668f-bdszt\" (UID: \"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790890 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/499f5ce1-e32c-477b-9afc-b3c63ee6b55a-config\") pod \"console-operator-58897d9998-4jldr\" (UID: \"499f5ce1-e32c-477b-9afc-b3c63ee6b55a\") " pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790922 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55f7075b-3a94-416c-830d-dab8afe9e6e5-serving-cert\") pod \"openshift-config-operator-7777fb866f-gr9mj\" (UID: \"55f7075b-3a94-416c-830d-dab8afe9e6e5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790945 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcmbh\" (UniqueName: \"kubernetes.io/projected/80947be8-e00a-4386-b49a-a42885cc132d-kube-api-access-hcmbh\") pod \"cluster-image-registry-operator-dc59b4c8b-mnc7h\" (UID: \"80947be8-e00a-4386-b49a-a42885cc132d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790960 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/ffc5a842-e6af-43b9-b41e-f5334b5bac14-etcd-ca\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790981 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.790997 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzt64\" (UniqueName: \"kubernetes.io/projected/53268028-cc7b-4417-bada-ac723f7e2527-kube-api-access-wzt64\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:56 crc kubenswrapper[5055]: E1011 06:55:56.791347 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:57.291337213 +0000 UTC m=+141.065611020 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.803434 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.824487 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.843368 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.862300 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.883157 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892117 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:56 crc kubenswrapper[5055]: E1011 06:55:56.892269 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:57.392252475 +0000 UTC m=+141.166526282 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892346 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/511cd957-1ec3-4efb-9700-bd83fb8a2999-metrics-tls\") pod \"ingress-operator-5b745b69d9-j9mng\" (UID: \"511cd957-1ec3-4efb-9700-bd83fb8a2999\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892376 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kft9\" (UniqueName: \"kubernetes.io/projected/be40637a-a178-4125-958b-c1bc36dee57d-kube-api-access-2kft9\") pod \"marketplace-operator-79b997595-fflht\" (UID: \"be40637a-a178-4125-958b-c1bc36dee57d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892396 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3a6c6b7a-2e81-450b-9ee6-03393169141f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-85c46\" (UID: \"3a6c6b7a-2e81-450b-9ee6-03393169141f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-85c46" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892411 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b3e614f0-8db8-477f-a9a0-88e8401a6590-certs\") pod \"machine-config-server-v6lr4\" (UID: \"b3e614f0-8db8-477f-a9a0-88e8401a6590\") " pod="openshift-machine-config-operator/machine-config-server-v6lr4" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892461 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4p8m\" (UniqueName: \"kubernetes.io/projected/6adf2dd2-e065-4164-9097-49bbfbf6c587-kube-api-access-z4p8m\") pod \"machine-config-controller-84d6567774-8hdjx\" (UID: \"6adf2dd2-e065-4164-9097-49bbfbf6c587\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892477 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e386a754-06bb-476a-9e52-5fd38c93edd4-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lrs96\" (UID: \"e386a754-06bb-476a-9e52-5fd38c93edd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892494 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/80947be8-e00a-4386-b49a-a42885cc132d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-mnc7h\" (UID: \"80947be8-e00a-4386-b49a-a42885cc132d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892540 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-bound-sa-token\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892570 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/511cd957-1ec3-4efb-9700-bd83fb8a2999-trusted-ca\") pod \"ingress-operator-5b745b69d9-j9mng\" (UID: \"511cd957-1ec3-4efb-9700-bd83fb8a2999\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892590 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgvsj\" (UniqueName: \"kubernetes.io/projected/511cd957-1ec3-4efb-9700-bd83fb8a2999-kube-api-access-cgvsj\") pod \"ingress-operator-5b745b69d9-j9mng\" (UID: \"511cd957-1ec3-4efb-9700-bd83fb8a2999\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892612 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45f55\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-kube-api-access-45f55\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892634 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ffc5a842-e6af-43b9-b41e-f5334b5bac14-etcd-client\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892655 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53268028-cc7b-4417-bada-ac723f7e2527-config\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892677 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-plugins-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892724 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n942m\" (UniqueName: \"kubernetes.io/projected/199dbd10-cbbc-4f91-bc11-6a7ea9dc6609-kube-api-access-n942m\") pod \"machine-api-operator-5694c8668f-bdszt\" (UID: \"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892746 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/25f102e3-a97b-4401-a760-98f34d6fe038-config-volume\") pod \"collect-profiles-29336085-mbdqg\" (UID: \"25f102e3-a97b-4401-a760-98f34d6fe038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892787 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/499f5ce1-e32c-477b-9afc-b3c63ee6b55a-config\") pod \"console-operator-58897d9998-4jldr\" (UID: \"499f5ce1-e32c-477b-9afc-b3c63ee6b55a\") " pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892809 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-service-ca\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892829 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1-metrics-tls\") pod \"dns-default-rdsf5\" (UID: \"9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1\") " pod="openshift-dns/dns-default-rdsf5" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892860 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55f7075b-3a94-416c-830d-dab8afe9e6e5-serving-cert\") pod \"openshift-config-operator-7777fb866f-gr9mj\" (UID: \"55f7075b-3a94-416c-830d-dab8afe9e6e5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892881 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8tmht\" (UID: \"abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892925 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/7c962da4-5bac-4323-9064-0db6f79ac65d-signing-cabundle\") pod \"service-ca-9c57cc56f-bx7lp\" (UID: \"7c962da4-5bac-4323-9064-0db6f79ac65d\") " pod="openshift-service-ca/service-ca-9c57cc56f-bx7lp" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892946 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6295\" (UniqueName: \"kubernetes.io/projected/c2809714-7dec-4c09-a838-6537ff52d750-kube-api-access-t6295\") pod \"ingress-canary-b9tgz\" (UID: \"c2809714-7dec-4c09-a838-6537ff52d750\") " pod="openshift-ingress-canary/ingress-canary-b9tgz" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892968 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sjkh\" (UniqueName: \"kubernetes.io/projected/9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1-kube-api-access-7sjkh\") pod \"dns-default-rdsf5\" (UID: \"9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1\") " pod="openshift-dns/dns-default-rdsf5" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.892989 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/ffc5a842-e6af-43b9-b41e-f5334b5bac14-etcd-ca\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893010 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6adf2dd2-e065-4164-9097-49bbfbf6c587-proxy-tls\") pod \"machine-config-controller-84d6567774-8hdjx\" (UID: \"6adf2dd2-e065-4164-9097-49bbfbf6c587\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893040 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/7c962da4-5bac-4323-9064-0db6f79ac65d-signing-key\") pod \"service-ca-9c57cc56f-bx7lp\" (UID: \"7c962da4-5bac-4323-9064-0db6f79ac65d\") " pod="openshift-service-ca/service-ca-9c57cc56f-bx7lp" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893082 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893106 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qh8x4\" (UniqueName: \"kubernetes.io/projected/f480d3b1-963f-4deb-8324-01262b8e78ca-kube-api-access-qh8x4\") pod \"migrator-59844c95c7-v2zwv\" (UID: \"f480d3b1-963f-4deb-8324-01262b8e78ca\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v2zwv" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893126 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c31fae5c-60b1-4fd8-a23e-0e65de805c30-auth-proxy-config\") pod \"machine-config-operator-74547568cd-4dmhf\" (UID: \"c31fae5c-60b1-4fd8-a23e-0e65de805c30\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893147 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/844e8b79-74e2-41c5-acdf-0fd560533b67-profile-collector-cert\") pod \"olm-operator-6b444d44fb-gs8q9\" (UID: \"844e8b79-74e2-41c5-acdf-0fd560533b67\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893205 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-oauth-serving-cert\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893238 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/7c8fb95a-98dc-4592-afe6-195dc531d8df-ca-trust-extracted\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893259 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/7c8fb95a-98dc-4592-afe6-195dc531d8df-registry-certificates\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893282 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqhjs\" (UniqueName: \"kubernetes.io/projected/83ae1f03-9b88-4d51-b8dc-d398f56f630f-kube-api-access-vqhjs\") pod \"service-ca-operator-777779d784-ztvqr\" (UID: \"83ae1f03-9b88-4d51-b8dc-d398f56f630f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893301 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e386a754-06bb-476a-9e52-5fd38c93edd4-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lrs96\" (UID: \"e386a754-06bb-476a-9e52-5fd38c93edd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893322 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b3e614f0-8db8-477f-a9a0-88e8401a6590-node-bootstrap-token\") pod \"machine-config-server-v6lr4\" (UID: \"b3e614f0-8db8-477f-a9a0-88e8401a6590\") " pod="openshift-machine-config-operator/machine-config-server-v6lr4" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893344 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1baf797b-7563-48fe-8f0f-e44b349edb96-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rxccz\" (UID: \"1baf797b-7563-48fe-8f0f-e44b349edb96\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893366 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssc7k\" (UniqueName: \"kubernetes.io/projected/5cc43413-bfc7-4cc2-984a-b6843e6cf829-kube-api-access-ssc7k\") pod \"packageserver-d55dfcdfc-qmgrs\" (UID: \"5cc43413-bfc7-4cc2-984a-b6843e6cf829\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893384 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1-config-volume\") pod \"dns-default-rdsf5\" (UID: \"9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1\") " pod="openshift-dns/dns-default-rdsf5" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893409 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/53268028-cc7b-4417-bada-ac723f7e2527-serving-cert\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893430 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c-config\") pod \"kube-controller-manager-operator-78b949d7b-8tmht\" (UID: \"abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893454 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/55f7075b-3a94-416c-830d-dab8afe9e6e5-available-featuregates\") pod \"openshift-config-operator-7777fb866f-gr9mj\" (UID: \"55f7075b-3a94-416c-830d-dab8afe9e6e5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893490 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqqq7\" (UniqueName: \"kubernetes.io/projected/499f5ce1-e32c-477b-9afc-b3c63ee6b55a-kube-api-access-bqqq7\") pod \"console-operator-58897d9998-4jldr\" (UID: \"499f5ce1-e32c-477b-9afc-b3c63ee6b55a\") " pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893526 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/499f5ce1-e32c-477b-9afc-b3c63ee6b55a-serving-cert\") pod \"console-operator-58897d9998-4jldr\" (UID: \"499f5ce1-e32c-477b-9afc-b3c63ee6b55a\") " pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893547 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8tmht\" (UID: \"abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893569 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fb49v\" (UniqueName: \"kubernetes.io/projected/1baf797b-7563-48fe-8f0f-e44b349edb96-kube-api-access-fb49v\") pod \"kube-storage-version-migrator-operator-b67b599dd-rxccz\" (UID: \"1baf797b-7563-48fe-8f0f-e44b349edb96\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893605 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-trusted-ca-bundle\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893623 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/844e8b79-74e2-41c5-acdf-0fd560533b67-srv-cert\") pod \"olm-operator-6b444d44fb-gs8q9\" (UID: \"844e8b79-74e2-41c5-acdf-0fd560533b67\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893645 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw2cn\" (UniqueName: \"kubernetes.io/projected/ffc5a842-e6af-43b9-b41e-f5334b5bac14-kube-api-access-gw2cn\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893690 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mq2sv\" (UniqueName: \"kubernetes.io/projected/25f102e3-a97b-4401-a760-98f34d6fe038-kube-api-access-mq2sv\") pod \"collect-profiles-29336085-mbdqg\" (UID: \"25f102e3-a97b-4401-a760-98f34d6fe038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893713 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssvqb\" (UniqueName: \"kubernetes.io/projected/2ed68501-b6df-40a5-b58b-669bb8ff37d6-kube-api-access-ssvqb\") pod \"control-plane-machine-set-operator-78cbb6b69f-5x9qp\" (UID: \"2ed68501-b6df-40a5-b58b-669bb8ff37d6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5x9qp" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893799 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffc5a842-e6af-43b9-b41e-f5334b5bac14-serving-cert\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893839 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-config\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893884 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9c82\" (UniqueName: \"kubernetes.io/projected/bdcba18d-9992-4e37-9937-16f727b35e14-kube-api-access-f9c82\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893961 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/199dbd10-cbbc-4f91-bc11-6a7ea9dc6609-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bdszt\" (UID: \"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.893999 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mq97\" (UniqueName: \"kubernetes.io/projected/47e97c97-ef9c-4a82-9530-33b9a82b34a7-kube-api-access-8mq97\") pod \"package-server-manager-789f6589d5-hk8wd\" (UID: \"47e97c97-ef9c-4a82-9530-33b9a82b34a7\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894040 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xslsb\" (UniqueName: \"kubernetes.io/projected/c2ee355c-7c34-4626-aac3-99d9db842b7e-kube-api-access-xslsb\") pod \"dns-operator-744455d44c-d5vkx\" (UID: \"c2ee355c-7c34-4626-aac3-99d9db842b7e\") " pod="openshift-dns-operator/dns-operator-744455d44c-d5vkx" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894074 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kftfn\" (UniqueName: \"kubernetes.io/projected/55f7075b-3a94-416c-830d-dab8afe9e6e5-kube-api-access-kftfn\") pod \"openshift-config-operator-7777fb866f-gr9mj\" (UID: \"55f7075b-3a94-416c-830d-dab8afe9e6e5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894097 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/584f9925-88f6-432d-97f5-8748946f3d68-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-hx7bz\" (UID: \"584f9925-88f6-432d-97f5-8748946f3d68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894149 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58ppg\" (UniqueName: \"kubernetes.io/projected/844e8b79-74e2-41c5-acdf-0fd560533b67-kube-api-access-58ppg\") pod \"olm-operator-6b444d44fb-gs8q9\" (UID: \"844e8b79-74e2-41c5-acdf-0fd560533b67\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894185 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffc5a842-e6af-43b9-b41e-f5334b5bac14-config\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894206 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/83ae1f03-9b88-4d51-b8dc-d398f56f630f-serving-cert\") pod \"service-ca-operator-777779d784-ztvqr\" (UID: \"83ae1f03-9b88-4d51-b8dc-d398f56f630f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894226 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92rhq\" (UniqueName: \"kubernetes.io/projected/bbad9e64-0bb0-4acc-9bff-63234abcf93c-kube-api-access-92rhq\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894274 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-registration-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894316 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/499f5ce1-e32c-477b-9afc-b3c63ee6b55a-trusted-ca\") pod \"console-operator-58897d9998-4jldr\" (UID: \"499f5ce1-e32c-477b-9afc-b3c63ee6b55a\") " pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894343 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/03155288-4e58-4224-bdd0-66ab4eddd34e-profile-collector-cert\") pod \"catalog-operator-68c6474976-xgrhb\" (UID: \"03155288-4e58-4224-bdd0-66ab4eddd34e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894367 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxcvk\" (UniqueName: \"kubernetes.io/projected/03155288-4e58-4224-bdd0-66ab4eddd34e-kube-api-access-qxcvk\") pod \"catalog-operator-68c6474976-xgrhb\" (UID: \"03155288-4e58-4224-bdd0-66ab4eddd34e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894390 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhrjt\" (UniqueName: \"kubernetes.io/projected/b3e614f0-8db8-477f-a9a0-88e8401a6590-kube-api-access-vhrjt\") pod \"machine-config-server-v6lr4\" (UID: \"b3e614f0-8db8-477f-a9a0-88e8401a6590\") " pod="openshift-machine-config-operator/machine-config-server-v6lr4" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894425 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e386a754-06bb-476a-9e52-5fd38c93edd4-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lrs96\" (UID: \"e386a754-06bb-476a-9e52-5fd38c93edd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894445 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c2809714-7dec-4c09-a838-6537ff52d750-cert\") pod \"ingress-canary-b9tgz\" (UID: \"c2809714-7dec-4c09-a838-6537ff52d750\") " pod="openshift-ingress-canary/ingress-canary-b9tgz" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894468 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69f1192a-51c9-4939-9431-f39e329d25b8-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-d6kpg\" (UID: \"69f1192a-51c9-4939-9431-f39e329d25b8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894489 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bdcba18d-9992-4e37-9937-16f727b35e14-metrics-certs\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894526 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c2ee355c-7c34-4626-aac3-99d9db842b7e-metrics-tls\") pod \"dns-operator-744455d44c-d5vkx\" (UID: \"c2ee355c-7c34-4626-aac3-99d9db842b7e\") " pod="openshift-dns-operator/dns-operator-744455d44c-d5vkx" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894547 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-csi-data-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894570 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53268028-cc7b-4417-bada-ac723f7e2527-service-ca-bundle\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894591 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/199dbd10-cbbc-4f91-bc11-6a7ea9dc6609-config\") pod \"machine-api-operator-5694c8668f-bdszt\" (UID: \"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894623 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cx28\" (UniqueName: \"kubernetes.io/projected/7c962da4-5bac-4323-9064-0db6f79ac65d-kube-api-access-9cx28\") pod \"service-ca-9c57cc56f-bx7lp\" (UID: \"7c962da4-5bac-4323-9064-0db6f79ac65d\") " pod="openshift-service-ca/service-ca-9c57cc56f-bx7lp" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894646 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/bdcba18d-9992-4e37-9937-16f727b35e14-default-certificate\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894667 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/2ed68501-b6df-40a5-b58b-669bb8ff37d6-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-5x9qp\" (UID: \"2ed68501-b6df-40a5-b58b-669bb8ff37d6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5x9qp" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894701 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/584f9925-88f6-432d-97f5-8748946f3d68-config\") pod \"kube-apiserver-operator-766d6c64bb-hx7bz\" (UID: \"584f9925-88f6-432d-97f5-8748946f3d68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894723 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c31fae5c-60b1-4fd8-a23e-0e65de805c30-images\") pod \"machine-config-operator-74547568cd-4dmhf\" (UID: \"c31fae5c-60b1-4fd8-a23e-0e65de805c30\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894756 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-socket-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894801 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/03155288-4e58-4224-bdd0-66ab4eddd34e-srv-cert\") pod \"catalog-operator-68c6474976-xgrhb\" (UID: \"03155288-4e58-4224-bdd0-66ab4eddd34e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894824 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dnpd\" (UniqueName: \"kubernetes.io/projected/3a6c6b7a-2e81-450b-9ee6-03393169141f-kube-api-access-6dnpd\") pod \"multus-admission-controller-857f4d67dd-85c46\" (UID: \"3a6c6b7a-2e81-450b-9ee6-03393169141f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-85c46" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894863 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5cc43413-bfc7-4cc2-984a-b6843e6cf829-apiservice-cert\") pod \"packageserver-d55dfcdfc-qmgrs\" (UID: \"5cc43413-bfc7-4cc2-984a-b6843e6cf829\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894901 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcmbh\" (UniqueName: \"kubernetes.io/projected/80947be8-e00a-4386-b49a-a42885cc132d-kube-api-access-hcmbh\") pod \"cluster-image-registry-operator-dc59b4c8b-mnc7h\" (UID: \"80947be8-e00a-4386-b49a-a42885cc132d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894924 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/bdcba18d-9992-4e37-9937-16f727b35e14-stats-auth\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894947 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/be40637a-a178-4125-958b-c1bc36dee57d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fflht\" (UID: \"be40637a-a178-4125-958b-c1bc36dee57d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.894981 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bdcba18d-9992-4e37-9937-16f727b35e14-service-ca-bundle\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.895005 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvzbq\" (UniqueName: \"kubernetes.io/projected/c31fae5c-60b1-4fd8-a23e-0e65de805c30-kube-api-access-bvzbq\") pod \"machine-config-operator-74547568cd-4dmhf\" (UID: \"c31fae5c-60b1-4fd8-a23e-0e65de805c30\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.895047 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzt64\" (UniqueName: \"kubernetes.io/projected/53268028-cc7b-4417-bada-ac723f7e2527-kube-api-access-wzt64\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.895081 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53268028-cc7b-4417-bada-ac723f7e2527-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.895102 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/5cc43413-bfc7-4cc2-984a-b6843e6cf829-tmpfs\") pod \"packageserver-d55dfcdfc-qmgrs\" (UID: \"5cc43413-bfc7-4cc2-984a-b6843e6cf829\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.895258 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/55f7075b-3a94-416c-830d-dab8afe9e6e5-available-featuregates\") pod \"openshift-config-operator-7777fb866f-gr9mj\" (UID: \"55f7075b-3a94-416c-830d-dab8afe9e6e5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.895872 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53268028-cc7b-4417-bada-ac723f7e2527-service-ca-bundle\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.896220 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/199dbd10-cbbc-4f91-bc11-6a7ea9dc6609-config\") pod \"machine-api-operator-5694c8668f-bdszt\" (UID: \"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" Oct 11 06:55:56 crc kubenswrapper[5055]: E1011 06:55:56.896615 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:57.396598837 +0000 UTC m=+141.170872714 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.896882 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffc5a842-e6af-43b9-b41e-f5334b5bac14-config\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897019 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/53268028-cc7b-4417-bada-ac723f7e2527-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897057 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/7c8fb95a-98dc-4592-afe6-195dc531d8df-registry-certificates\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897247 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/47e97c97-ef9c-4a82-9530-33b9a82b34a7-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-hk8wd\" (UID: \"47e97c97-ef9c-4a82-9530-33b9a82b34a7\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897283 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-mountpoint-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897315 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/80947be8-e00a-4386-b49a-a42885cc132d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-mnc7h\" (UID: \"80947be8-e00a-4386-b49a-a42885cc132d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897345 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/ffc5a842-e6af-43b9-b41e-f5334b5bac14-etcd-service-ca\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897393 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/25f102e3-a97b-4401-a760-98f34d6fe038-secret-volume\") pod \"collect-profiles-29336085-mbdqg\" (UID: \"25f102e3-a97b-4401-a760-98f34d6fe038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897422 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69f1192a-51c9-4939-9431-f39e329d25b8-config\") pod \"openshift-apiserver-operator-796bbdcf4f-d6kpg\" (UID: \"69f1192a-51c9-4939-9431-f39e329d25b8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897449 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qf552\" (UniqueName: \"kubernetes.io/projected/69f1192a-51c9-4939-9431-f39e329d25b8-kube-api-access-qf552\") pod \"openshift-apiserver-operator-796bbdcf4f-d6kpg\" (UID: \"69f1192a-51c9-4939-9431-f39e329d25b8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897474 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1baf797b-7563-48fe-8f0f-e44b349edb96-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rxccz\" (UID: \"1baf797b-7563-48fe-8f0f-e44b349edb96\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897498 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/511cd957-1ec3-4efb-9700-bd83fb8a2999-bound-sa-token\") pod \"ingress-operator-5b745b69d9-j9mng\" (UID: \"511cd957-1ec3-4efb-9700-bd83fb8a2999\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897522 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsjpf\" (UniqueName: \"kubernetes.io/projected/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-kube-api-access-jsjpf\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897548 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/7c8fb95a-98dc-4592-afe6-195dc531d8df-ca-trust-extracted\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897555 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9grj\" (UniqueName: \"kubernetes.io/projected/51689c82-1eb8-4080-ab6c-44759c1c5b1a-kube-api-access-b9grj\") pod \"cluster-samples-operator-665b6dd947-rvswx\" (UID: \"51689c82-1eb8-4080-ab6c-44759c1c5b1a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897809 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6adf2dd2-e065-4164-9097-49bbfbf6c587-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-8hdjx\" (UID: \"6adf2dd2-e065-4164-9097-49bbfbf6c587\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.897842 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c31fae5c-60b1-4fd8-a23e-0e65de805c30-proxy-tls\") pod \"machine-config-operator-74547568cd-4dmhf\" (UID: \"c31fae5c-60b1-4fd8-a23e-0e65de805c30\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.898200 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7c8fb95a-98dc-4592-afe6-195dc531d8df-trusted-ca\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.898209 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/499f5ce1-e32c-477b-9afc-b3c63ee6b55a-trusted-ca\") pod \"console-operator-58897d9998-4jldr\" (UID: \"499f5ce1-e32c-477b-9afc-b3c63ee6b55a\") " pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.898233 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/51689c82-1eb8-4080-ab6c-44759c1c5b1a-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-rvswx\" (UID: \"51689c82-1eb8-4080-ab6c-44759c1c5b1a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.898262 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5cc43413-bfc7-4cc2-984a-b6843e6cf829-webhook-cert\") pod \"packageserver-d55dfcdfc-qmgrs\" (UID: \"5cc43413-bfc7-4cc2-984a-b6843e6cf829\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.898286 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83ae1f03-9b88-4d51-b8dc-d398f56f630f-config\") pod \"service-ca-operator-777779d784-ztvqr\" (UID: \"83ae1f03-9b88-4d51-b8dc-d398f56f630f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.898488 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c2ee355c-7c34-4626-aac3-99d9db842b7e-metrics-tls\") pod \"dns-operator-744455d44c-d5vkx\" (UID: \"c2ee355c-7c34-4626-aac3-99d9db842b7e\") " pod="openshift-dns-operator/dns-operator-744455d44c-d5vkx" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.898549 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/53268028-cc7b-4417-bada-ac723f7e2527-serving-cert\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.899105 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55f7075b-3a94-416c-830d-dab8afe9e6e5-serving-cert\") pod \"openshift-config-operator-7777fb866f-gr9mj\" (UID: \"55f7075b-3a94-416c-830d-dab8afe9e6e5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.899822 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/499f5ce1-e32c-477b-9afc-b3c63ee6b55a-serving-cert\") pod \"console-operator-58897d9998-4jldr\" (UID: \"499f5ce1-e32c-477b-9afc-b3c63ee6b55a\") " pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.899859 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/7c8fb95a-98dc-4592-afe6-195dc531d8df-installation-pull-secrets\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.899873 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/ffc5a842-e6af-43b9-b41e-f5334b5bac14-etcd-service-ca\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.899888 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/199dbd10-cbbc-4f91-bc11-6a7ea9dc6609-images\") pod \"machine-api-operator-5694c8668f-bdszt\" (UID: \"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.899976 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69f1192a-51c9-4939-9431-f39e329d25b8-config\") pod \"openshift-apiserver-operator-796bbdcf4f-d6kpg\" (UID: \"69f1192a-51c9-4939-9431-f39e329d25b8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.900474 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69f1192a-51c9-4939-9431-f39e329d25b8-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-d6kpg\" (UID: \"69f1192a-51c9-4939-9431-f39e329d25b8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.901009 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/53268028-cc7b-4417-bada-ac723f7e2527-config\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.901532 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7c8fb95a-98dc-4592-afe6-195dc531d8df-trusted-ca\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.899640 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/ffc5a842-e6af-43b9-b41e-f5334b5bac14-etcd-ca\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.901445 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/80947be8-e00a-4386-b49a-a42885cc132d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-mnc7h\" (UID: \"80947be8-e00a-4386-b49a-a42885cc132d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.902480 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/499f5ce1-e32c-477b-9afc-b3c63ee6b55a-config\") pod \"console-operator-58897d9998-4jldr\" (UID: \"499f5ce1-e32c-477b-9afc-b3c63ee6b55a\") " pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.902493 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-registry-tls\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.902862 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-serving-cert\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.902892 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-oauth-config\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.903404 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/199dbd10-cbbc-4f91-bc11-6a7ea9dc6609-images\") pod \"machine-api-operator-5694c8668f-bdszt\" (UID: \"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.902220 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffc5a842-e6af-43b9-b41e-f5334b5bac14-serving-cert\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.903725 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/80947be8-e00a-4386-b49a-a42885cc132d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-mnc7h\" (UID: \"80947be8-e00a-4386-b49a-a42885cc132d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.904166 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.904642 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/be40637a-a178-4125-958b-c1bc36dee57d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fflht\" (UID: \"be40637a-a178-4125-958b-c1bc36dee57d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.904751 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/584f9925-88f6-432d-97f5-8748946f3d68-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-hx7bz\" (UID: \"584f9925-88f6-432d-97f5-8748946f3d68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.906221 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/51689c82-1eb8-4080-ab6c-44759c1c5b1a-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-rvswx\" (UID: \"51689c82-1eb8-4080-ab6c-44759c1c5b1a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.907068 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ffc5a842-e6af-43b9-b41e-f5334b5bac14-etcd-client\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.907520 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/7c8fb95a-98dc-4592-afe6-195dc531d8df-installation-pull-secrets\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.911354 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-registry-tls\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.912978 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/80947be8-e00a-4386-b49a-a42885cc132d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-mnc7h\" (UID: \"80947be8-e00a-4386-b49a-a42885cc132d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.914252 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/199dbd10-cbbc-4f91-bc11-6a7ea9dc6609-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bdszt\" (UID: \"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.922539 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.943379 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.963390 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Oct 11 06:55:56 crc kubenswrapper[5055]: I1011 06:55:56.983688 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.003167 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007231 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.007355 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:57.507325813 +0000 UTC m=+141.281599620 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007428 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5cc43413-bfc7-4cc2-984a-b6843e6cf829-apiservice-cert\") pod \"packageserver-d55dfcdfc-qmgrs\" (UID: \"5cc43413-bfc7-4cc2-984a-b6843e6cf829\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007466 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/bdcba18d-9992-4e37-9937-16f727b35e14-stats-auth\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007484 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/be40637a-a178-4125-958b-c1bc36dee57d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fflht\" (UID: \"be40637a-a178-4125-958b-c1bc36dee57d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007525 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvzbq\" (UniqueName: \"kubernetes.io/projected/c31fae5c-60b1-4fd8-a23e-0e65de805c30-kube-api-access-bvzbq\") pod \"machine-config-operator-74547568cd-4dmhf\" (UID: \"c31fae5c-60b1-4fd8-a23e-0e65de805c30\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007542 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bdcba18d-9992-4e37-9937-16f727b35e14-service-ca-bundle\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007588 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/5cc43413-bfc7-4cc2-984a-b6843e6cf829-tmpfs\") pod \"packageserver-d55dfcdfc-qmgrs\" (UID: \"5cc43413-bfc7-4cc2-984a-b6843e6cf829\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007610 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/47e97c97-ef9c-4a82-9530-33b9a82b34a7-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-hk8wd\" (UID: \"47e97c97-ef9c-4a82-9530-33b9a82b34a7\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007627 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-mountpoint-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007645 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/25f102e3-a97b-4401-a760-98f34d6fe038-secret-volume\") pod \"collect-profiles-29336085-mbdqg\" (UID: \"25f102e3-a97b-4401-a760-98f34d6fe038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007686 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/511cd957-1ec3-4efb-9700-bd83fb8a2999-bound-sa-token\") pod \"ingress-operator-5b745b69d9-j9mng\" (UID: \"511cd957-1ec3-4efb-9700-bd83fb8a2999\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007702 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsjpf\" (UniqueName: \"kubernetes.io/projected/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-kube-api-access-jsjpf\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007722 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1baf797b-7563-48fe-8f0f-e44b349edb96-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rxccz\" (UID: \"1baf797b-7563-48fe-8f0f-e44b349edb96\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007776 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6adf2dd2-e065-4164-9097-49bbfbf6c587-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-8hdjx\" (UID: \"6adf2dd2-e065-4164-9097-49bbfbf6c587\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007793 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5cc43413-bfc7-4cc2-984a-b6843e6cf829-webhook-cert\") pod \"packageserver-d55dfcdfc-qmgrs\" (UID: \"5cc43413-bfc7-4cc2-984a-b6843e6cf829\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007808 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c31fae5c-60b1-4fd8-a23e-0e65de805c30-proxy-tls\") pod \"machine-config-operator-74547568cd-4dmhf\" (UID: \"c31fae5c-60b1-4fd8-a23e-0e65de805c30\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007824 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83ae1f03-9b88-4d51-b8dc-d398f56f630f-config\") pod \"service-ca-operator-777779d784-ztvqr\" (UID: \"83ae1f03-9b88-4d51-b8dc-d398f56f630f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007844 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-serving-cert\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007864 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-oauth-config\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007899 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/be40637a-a178-4125-958b-c1bc36dee57d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fflht\" (UID: \"be40637a-a178-4125-958b-c1bc36dee57d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007930 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/584f9925-88f6-432d-97f5-8748946f3d68-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-hx7bz\" (UID: \"584f9925-88f6-432d-97f5-8748946f3d68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007952 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/511cd957-1ec3-4efb-9700-bd83fb8a2999-metrics-tls\") pod \"ingress-operator-5b745b69d9-j9mng\" (UID: \"511cd957-1ec3-4efb-9700-bd83fb8a2999\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007973 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kft9\" (UniqueName: \"kubernetes.io/projected/be40637a-a178-4125-958b-c1bc36dee57d-kube-api-access-2kft9\") pod \"marketplace-operator-79b997595-fflht\" (UID: \"be40637a-a178-4125-958b-c1bc36dee57d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.007995 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3a6c6b7a-2e81-450b-9ee6-03393169141f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-85c46\" (UID: \"3a6c6b7a-2e81-450b-9ee6-03393169141f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-85c46" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008024 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4p8m\" (UniqueName: \"kubernetes.io/projected/6adf2dd2-e065-4164-9097-49bbfbf6c587-kube-api-access-z4p8m\") pod \"machine-config-controller-84d6567774-8hdjx\" (UID: \"6adf2dd2-e065-4164-9097-49bbfbf6c587\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008048 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b3e614f0-8db8-477f-a9a0-88e8401a6590-certs\") pod \"machine-config-server-v6lr4\" (UID: \"b3e614f0-8db8-477f-a9a0-88e8401a6590\") " pod="openshift-machine-config-operator/machine-config-server-v6lr4" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008085 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e386a754-06bb-476a-9e52-5fd38c93edd4-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lrs96\" (UID: \"e386a754-06bb-476a-9e52-5fd38c93edd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008116 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/511cd957-1ec3-4efb-9700-bd83fb8a2999-trusted-ca\") pod \"ingress-operator-5b745b69d9-j9mng\" (UID: \"511cd957-1ec3-4efb-9700-bd83fb8a2999\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008195 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgvsj\" (UniqueName: \"kubernetes.io/projected/511cd957-1ec3-4efb-9700-bd83fb8a2999-kube-api-access-cgvsj\") pod \"ingress-operator-5b745b69d9-j9mng\" (UID: \"511cd957-1ec3-4efb-9700-bd83fb8a2999\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008259 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-plugins-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008297 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/25f102e3-a97b-4401-a760-98f34d6fe038-config-volume\") pod \"collect-profiles-29336085-mbdqg\" (UID: \"25f102e3-a97b-4401-a760-98f34d6fe038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008362 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1-metrics-tls\") pod \"dns-default-rdsf5\" (UID: \"9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1\") " pod="openshift-dns/dns-default-rdsf5" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008388 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-service-ca\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008444 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8tmht\" (UID: \"abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008479 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/7c962da4-5bac-4323-9064-0db6f79ac65d-signing-cabundle\") pod \"service-ca-9c57cc56f-bx7lp\" (UID: \"7c962da4-5bac-4323-9064-0db6f79ac65d\") " pod="openshift-service-ca/service-ca-9c57cc56f-bx7lp" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008536 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6295\" (UniqueName: \"kubernetes.io/projected/c2809714-7dec-4c09-a838-6537ff52d750-kube-api-access-t6295\") pod \"ingress-canary-b9tgz\" (UID: \"c2809714-7dec-4c09-a838-6537ff52d750\") " pod="openshift-ingress-canary/ingress-canary-b9tgz" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008560 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sjkh\" (UniqueName: \"kubernetes.io/projected/9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1-kube-api-access-7sjkh\") pod \"dns-default-rdsf5\" (UID: \"9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1\") " pod="openshift-dns/dns-default-rdsf5" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008620 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6adf2dd2-e065-4164-9097-49bbfbf6c587-proxy-tls\") pod \"machine-config-controller-84d6567774-8hdjx\" (UID: \"6adf2dd2-e065-4164-9097-49bbfbf6c587\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008647 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/7c962da4-5bac-4323-9064-0db6f79ac65d-signing-key\") pod \"service-ca-9c57cc56f-bx7lp\" (UID: \"7c962da4-5bac-4323-9064-0db6f79ac65d\") " pod="openshift-service-ca/service-ca-9c57cc56f-bx7lp" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008704 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008730 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/844e8b79-74e2-41c5-acdf-0fd560533b67-profile-collector-cert\") pod \"olm-operator-6b444d44fb-gs8q9\" (UID: \"844e8b79-74e2-41c5-acdf-0fd560533b67\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008792 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qh8x4\" (UniqueName: \"kubernetes.io/projected/f480d3b1-963f-4deb-8324-01262b8e78ca-kube-api-access-qh8x4\") pod \"migrator-59844c95c7-v2zwv\" (UID: \"f480d3b1-963f-4deb-8324-01262b8e78ca\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v2zwv" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008816 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c31fae5c-60b1-4fd8-a23e-0e65de805c30-auth-proxy-config\") pod \"machine-config-operator-74547568cd-4dmhf\" (UID: \"c31fae5c-60b1-4fd8-a23e-0e65de805c30\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008868 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-oauth-serving-cert\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008906 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bdcba18d-9992-4e37-9937-16f727b35e14-service-ca-bundle\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008937 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1baf797b-7563-48fe-8f0f-e44b349edb96-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rxccz\" (UID: \"1baf797b-7563-48fe-8f0f-e44b349edb96\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.008965 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqhjs\" (UniqueName: \"kubernetes.io/projected/83ae1f03-9b88-4d51-b8dc-d398f56f630f-kube-api-access-vqhjs\") pod \"service-ca-operator-777779d784-ztvqr\" (UID: \"83ae1f03-9b88-4d51-b8dc-d398f56f630f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009019 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e386a754-06bb-476a-9e52-5fd38c93edd4-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lrs96\" (UID: \"e386a754-06bb-476a-9e52-5fd38c93edd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009047 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b3e614f0-8db8-477f-a9a0-88e8401a6590-node-bootstrap-token\") pod \"machine-config-server-v6lr4\" (UID: \"b3e614f0-8db8-477f-a9a0-88e8401a6590\") " pod="openshift-machine-config-operator/machine-config-server-v6lr4" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009090 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1-config-volume\") pod \"dns-default-rdsf5\" (UID: \"9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1\") " pod="openshift-dns/dns-default-rdsf5" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009122 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssc7k\" (UniqueName: \"kubernetes.io/projected/5cc43413-bfc7-4cc2-984a-b6843e6cf829-kube-api-access-ssc7k\") pod \"packageserver-d55dfcdfc-qmgrs\" (UID: \"5cc43413-bfc7-4cc2-984a-b6843e6cf829\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009146 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c-config\") pod \"kube-controller-manager-operator-78b949d7b-8tmht\" (UID: \"abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009205 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8tmht\" (UID: \"abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009230 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fb49v\" (UniqueName: \"kubernetes.io/projected/1baf797b-7563-48fe-8f0f-e44b349edb96-kube-api-access-fb49v\") pod \"kube-storage-version-migrator-operator-b67b599dd-rxccz\" (UID: \"1baf797b-7563-48fe-8f0f-e44b349edb96\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009253 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-trusted-ca-bundle\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009273 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/844e8b79-74e2-41c5-acdf-0fd560533b67-srv-cert\") pod \"olm-operator-6b444d44fb-gs8q9\" (UID: \"844e8b79-74e2-41c5-acdf-0fd560533b67\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009306 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mq2sv\" (UniqueName: \"kubernetes.io/projected/25f102e3-a97b-4401-a760-98f34d6fe038-kube-api-access-mq2sv\") pod \"collect-profiles-29336085-mbdqg\" (UID: \"25f102e3-a97b-4401-a760-98f34d6fe038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009351 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/5cc43413-bfc7-4cc2-984a-b6843e6cf829-tmpfs\") pod \"packageserver-d55dfcdfc-qmgrs\" (UID: \"5cc43413-bfc7-4cc2-984a-b6843e6cf829\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009360 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssvqb\" (UniqueName: \"kubernetes.io/projected/2ed68501-b6df-40a5-b58b-669bb8ff37d6-kube-api-access-ssvqb\") pod \"control-plane-machine-set-operator-78cbb6b69f-5x9qp\" (UID: \"2ed68501-b6df-40a5-b58b-669bb8ff37d6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5x9qp" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009406 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9c82\" (UniqueName: \"kubernetes.io/projected/bdcba18d-9992-4e37-9937-16f727b35e14-kube-api-access-f9c82\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009427 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83ae1f03-9b88-4d51-b8dc-d398f56f630f-config\") pod \"service-ca-operator-777779d784-ztvqr\" (UID: \"83ae1f03-9b88-4d51-b8dc-d398f56f630f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009522 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-mountpoint-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009432 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-config\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009611 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6adf2dd2-e065-4164-9097-49bbfbf6c587-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-8hdjx\" (UID: \"6adf2dd2-e065-4164-9097-49bbfbf6c587\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009642 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/511cd957-1ec3-4efb-9700-bd83fb8a2999-trusted-ca\") pod \"ingress-operator-5b745b69d9-j9mng\" (UID: \"511cd957-1ec3-4efb-9700-bd83fb8a2999\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009664 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mq97\" (UniqueName: \"kubernetes.io/projected/47e97c97-ef9c-4a82-9530-33b9a82b34a7-kube-api-access-8mq97\") pod \"package-server-manager-789f6589d5-hk8wd\" (UID: \"47e97c97-ef9c-4a82-9530-33b9a82b34a7\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009687 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-plugins-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009902 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/584f9925-88f6-432d-97f5-8748946f3d68-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-hx7bz\" (UID: \"584f9925-88f6-432d-97f5-8748946f3d68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009943 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/83ae1f03-9b88-4d51-b8dc-d398f56f630f-serving-cert\") pod \"service-ca-operator-777779d784-ztvqr\" (UID: \"83ae1f03-9b88-4d51-b8dc-d398f56f630f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009968 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92rhq\" (UniqueName: \"kubernetes.io/projected/bbad9e64-0bb0-4acc-9bff-63234abcf93c-kube-api-access-92rhq\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.009991 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58ppg\" (UniqueName: \"kubernetes.io/projected/844e8b79-74e2-41c5-acdf-0fd560533b67-kube-api-access-58ppg\") pod \"olm-operator-6b444d44fb-gs8q9\" (UID: \"844e8b79-74e2-41c5-acdf-0fd560533b67\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010016 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/03155288-4e58-4224-bdd0-66ab4eddd34e-profile-collector-cert\") pod \"catalog-operator-68c6474976-xgrhb\" (UID: \"03155288-4e58-4224-bdd0-66ab4eddd34e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010037 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxcvk\" (UniqueName: \"kubernetes.io/projected/03155288-4e58-4224-bdd0-66ab4eddd34e-kube-api-access-qxcvk\") pod \"catalog-operator-68c6474976-xgrhb\" (UID: \"03155288-4e58-4224-bdd0-66ab4eddd34e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010059 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhrjt\" (UniqueName: \"kubernetes.io/projected/b3e614f0-8db8-477f-a9a0-88e8401a6590-kube-api-access-vhrjt\") pod \"machine-config-server-v6lr4\" (UID: \"b3e614f0-8db8-477f-a9a0-88e8401a6590\") " pod="openshift-machine-config-operator/machine-config-server-v6lr4" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010081 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-registration-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010105 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e386a754-06bb-476a-9e52-5fd38c93edd4-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lrs96\" (UID: \"e386a754-06bb-476a-9e52-5fd38c93edd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010251 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-registration-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010280 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c2809714-7dec-4c09-a838-6537ff52d750-cert\") pod \"ingress-canary-b9tgz\" (UID: \"c2809714-7dec-4c09-a838-6537ff52d750\") " pod="openshift-ingress-canary/ingress-canary-b9tgz" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010306 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bdcba18d-9992-4e37-9937-16f727b35e14-metrics-certs\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010331 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-csi-data-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010360 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cx28\" (UniqueName: \"kubernetes.io/projected/7c962da4-5bac-4323-9064-0db6f79ac65d-kube-api-access-9cx28\") pod \"service-ca-9c57cc56f-bx7lp\" (UID: \"7c962da4-5bac-4323-9064-0db6f79ac65d\") " pod="openshift-service-ca/service-ca-9c57cc56f-bx7lp" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010313 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c31fae5c-60b1-4fd8-a23e-0e65de805c30-auth-proxy-config\") pod \"machine-config-operator-74547568cd-4dmhf\" (UID: \"c31fae5c-60b1-4fd8-a23e-0e65de805c30\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010388 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/bdcba18d-9992-4e37-9937-16f727b35e14-default-certificate\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010414 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/2ed68501-b6df-40a5-b58b-669bb8ff37d6-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-5x9qp\" (UID: \"2ed68501-b6df-40a5-b58b-669bb8ff37d6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5x9qp" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010442 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/584f9925-88f6-432d-97f5-8748946f3d68-config\") pod \"kube-apiserver-operator-766d6c64bb-hx7bz\" (UID: \"584f9925-88f6-432d-97f5-8748946f3d68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010466 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c31fae5c-60b1-4fd8-a23e-0e65de805c30-images\") pod \"machine-config-operator-74547568cd-4dmhf\" (UID: \"c31fae5c-60b1-4fd8-a23e-0e65de805c30\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010495 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/03155288-4e58-4224-bdd0-66ab4eddd34e-srv-cert\") pod \"catalog-operator-68c6474976-xgrhb\" (UID: \"03155288-4e58-4224-bdd0-66ab4eddd34e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010524 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dnpd\" (UniqueName: \"kubernetes.io/projected/3a6c6b7a-2e81-450b-9ee6-03393169141f-kube-api-access-6dnpd\") pod \"multus-admission-controller-857f4d67dd-85c46\" (UID: \"3a6c6b7a-2e81-450b-9ee6-03393169141f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-85c46" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010549 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-socket-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010637 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/be40637a-a178-4125-958b-c1bc36dee57d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-fflht\" (UID: \"be40637a-a178-4125-958b-c1bc36dee57d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010659 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-socket-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010694 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/bdcba18d-9992-4e37-9937-16f727b35e14-stats-auth\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010744 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-csi-data-dir\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.010970 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5cc43413-bfc7-4cc2-984a-b6843e6cf829-apiservice-cert\") pod \"packageserver-d55dfcdfc-qmgrs\" (UID: \"5cc43413-bfc7-4cc2-984a-b6843e6cf829\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.011595 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3a6c6b7a-2e81-450b-9ee6-03393169141f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-85c46\" (UID: \"3a6c6b7a-2e81-450b-9ee6-03393169141f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-85c46" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.011670 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/584f9925-88f6-432d-97f5-8748946f3d68-config\") pod \"kube-apiserver-operator-766d6c64bb-hx7bz\" (UID: \"584f9925-88f6-432d-97f5-8748946f3d68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.011885 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/7c962da4-5bac-4323-9064-0db6f79ac65d-signing-cabundle\") pod \"service-ca-9c57cc56f-bx7lp\" (UID: \"7c962da4-5bac-4323-9064-0db6f79ac65d\") " pod="openshift-service-ca/service-ca-9c57cc56f-bx7lp" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.011947 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5cc43413-bfc7-4cc2-984a-b6843e6cf829-webhook-cert\") pod \"packageserver-d55dfcdfc-qmgrs\" (UID: \"5cc43413-bfc7-4cc2-984a-b6843e6cf829\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.012105 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:57.512063539 +0000 UTC m=+141.286337426 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.012993 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c-config\") pod \"kube-controller-manager-operator-78b949d7b-8tmht\" (UID: \"abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.013258 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bdcba18d-9992-4e37-9937-16f727b35e14-metrics-certs\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.013261 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e386a754-06bb-476a-9e52-5fd38c93edd4-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lrs96\" (UID: \"e386a754-06bb-476a-9e52-5fd38c93edd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.013319 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/511cd957-1ec3-4efb-9700-bd83fb8a2999-metrics-tls\") pod \"ingress-operator-5b745b69d9-j9mng\" (UID: \"511cd957-1ec3-4efb-9700-bd83fb8a2999\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.014010 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e386a754-06bb-476a-9e52-5fd38c93edd4-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lrs96\" (UID: \"e386a754-06bb-476a-9e52-5fd38c93edd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.014016 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/be40637a-a178-4125-958b-c1bc36dee57d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-fflht\" (UID: \"be40637a-a178-4125-958b-c1bc36dee57d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.014083 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/bdcba18d-9992-4e37-9937-16f727b35e14-default-certificate\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.014909 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/83ae1f03-9b88-4d51-b8dc-d398f56f630f-serving-cert\") pod \"service-ca-operator-777779d784-ztvqr\" (UID: \"83ae1f03-9b88-4d51-b8dc-d398f56f630f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.014983 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/7c962da4-5bac-4323-9064-0db6f79ac65d-signing-key\") pod \"service-ca-9c57cc56f-bx7lp\" (UID: \"7c962da4-5bac-4323-9064-0db6f79ac65d\") " pod="openshift-service-ca/service-ca-9c57cc56f-bx7lp" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.015830 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8tmht\" (UID: \"abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.016237 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/584f9925-88f6-432d-97f5-8748946f3d68-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-hx7bz\" (UID: \"584f9925-88f6-432d-97f5-8748946f3d68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.016916 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/2ed68501-b6df-40a5-b58b-669bb8ff37d6-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-5x9qp\" (UID: \"2ed68501-b6df-40a5-b58b-669bb8ff37d6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5x9qp" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.016991 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6adf2dd2-e065-4164-9097-49bbfbf6c587-proxy-tls\") pod \"machine-config-controller-84d6567774-8hdjx\" (UID: \"6adf2dd2-e065-4164-9097-49bbfbf6c587\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.023010 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.043504 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.051617 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/c31fae5c-60b1-4fd8-a23e-0e65de805c30-proxy-tls\") pod \"machine-config-operator-74547568cd-4dmhf\" (UID: \"c31fae5c-60b1-4fd8-a23e-0e65de805c30\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.063064 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.088338 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/844e8b79-74e2-41c5-acdf-0fd560533b67-profile-collector-cert\") pod \"olm-operator-6b444d44fb-gs8q9\" (UID: \"844e8b79-74e2-41c5-acdf-0fd560533b67\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.090479 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/03155288-4e58-4224-bdd0-66ab4eddd34e-profile-collector-cert\") pod \"catalog-operator-68c6474976-xgrhb\" (UID: \"03155288-4e58-4224-bdd0-66ab4eddd34e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.091084 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/25f102e3-a97b-4401-a760-98f34d6fe038-secret-volume\") pod \"collect-profiles-29336085-mbdqg\" (UID: \"25f102e3-a97b-4401-a760-98f34d6fe038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.091442 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.103914 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.111544 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.111677 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:57.611659276 +0000 UTC m=+141.385933083 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.111742 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c31fae5c-60b1-4fd8-a23e-0e65de805c30-images\") pod \"machine-config-operator-74547568cd-4dmhf\" (UID: \"c31fae5c-60b1-4fd8-a23e-0e65de805c30\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.112163 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.112498 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:57.612490725 +0000 UTC m=+141.386764532 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.122949 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.133449 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/25f102e3-a97b-4401-a760-98f34d6fe038-config-volume\") pod \"collect-profiles-29336085-mbdqg\" (UID: \"25f102e3-a97b-4401-a760-98f34d6fe038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.142382 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.154730 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/844e8b79-74e2-41c5-acdf-0fd560533b67-srv-cert\") pod \"olm-operator-6b444d44fb-gs8q9\" (UID: \"844e8b79-74e2-41c5-acdf-0fd560533b67\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.163523 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.174073 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/03155288-4e58-4224-bdd0-66ab4eddd34e-srv-cert\") pod \"catalog-operator-68c6474976-xgrhb\" (UID: \"03155288-4e58-4224-bdd0-66ab4eddd34e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.182617 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.203731 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.212566 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.212778 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:57.712743224 +0000 UTC m=+141.487017031 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.213140 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.213461 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:57.713453919 +0000 UTC m=+141.487727726 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.214349 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-serving-cert\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.222825 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.232510 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-oauth-config\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.244687 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.252608 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-service-ca\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.262515 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.272402 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-config\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.288331 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.292534 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-trusted-ca-bundle\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.302715 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.312757 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-oauth-serving-cert\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.314561 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.314735 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:57.814720374 +0000 UTC m=+141.588994181 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.315211 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.315549 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:57.815537902 +0000 UTC m=+141.589811709 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.323302 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.333451 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/47e97c97-ef9c-4a82-9530-33b9a82b34a7-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-hk8wd\" (UID: \"47e97c97-ef9c-4a82-9530-33b9a82b34a7\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.343875 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.362876 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.369406 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1baf797b-7563-48fe-8f0f-e44b349edb96-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-rxccz\" (UID: \"1baf797b-7563-48fe-8f0f-e44b349edb96\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.382703 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.394810 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1baf797b-7563-48fe-8f0f-e44b349edb96-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-rxccz\" (UID: \"1baf797b-7563-48fe-8f0f-e44b349edb96\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.403256 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.416071 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.416694 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:57.916682363 +0000 UTC m=+141.690956170 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.423274 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.444345 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.463316 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.475605 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b3e614f0-8db8-477f-a9a0-88e8401a6590-node-bootstrap-token\") pod \"machine-config-server-v6lr4\" (UID: \"b3e614f0-8db8-477f-a9a0-88e8401a6590\") " pod="openshift-machine-config-operator/machine-config-server-v6lr4" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.482868 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.493216 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b3e614f0-8db8-477f-a9a0-88e8401a6590-certs\") pod \"machine-config-server-v6lr4\" (UID: \"b3e614f0-8db8-477f-a9a0-88e8401a6590\") " pod="openshift-machine-config-operator/machine-config-server-v6lr4" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.502742 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.512055 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1-config-volume\") pod \"dns-default-rdsf5\" (UID: \"9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1\") " pod="openshift-dns/dns-default-rdsf5" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.518219 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.518734 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:58.018719935 +0000 UTC m=+141.792993742 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.522834 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.543378 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.554533 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1-metrics-tls\") pod \"dns-default-rdsf5\" (UID: \"9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1\") " pod="openshift-dns/dns-default-rdsf5" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.563737 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.582954 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.595457 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c2809714-7dec-4c09-a838-6537ff52d750-cert\") pod \"ingress-canary-b9tgz\" (UID: \"c2809714-7dec-4c09-a838-6537ff52d750\") " pod="openshift-ingress-canary/ingress-canary-b9tgz" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.603067 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.620028 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.620127 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:58.120111334 +0000 UTC m=+141.894385141 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.620436 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.620912 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:58.120897882 +0000 UTC m=+141.895171689 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.622954 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.643073 5055 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.663350 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.682693 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.718248 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmvbg\" (UniqueName: \"kubernetes.io/projected/c8de3d54-de40-4186-be10-f0a394a18830-kube-api-access-gmvbg\") pod \"downloads-7954f5f757-rdv7m\" (UID: \"c8de3d54-de40-4186-be10-f0a394a18830\") " pod="openshift-console/downloads-7954f5f757-rdv7m" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.720970 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-rdv7m" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.721111 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.721246 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:58.221231944 +0000 UTC m=+141.995505751 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.721528 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.721940 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:58.221924528 +0000 UTC m=+141.996198335 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.736429 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6g7l\" (UniqueName: \"kubernetes.io/projected/c7f95b49-339c-401f-975f-e356a2077b01-kube-api-access-h6g7l\") pod \"controller-manager-879f6c89f-jsqdp\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.757694 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7x8v\" (UniqueName: \"kubernetes.io/projected/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-kube-api-access-n7x8v\") pod \"oauth-openshift-558db77b4-f5tqq\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.776873 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tl54b\" (UniqueName: \"kubernetes.io/projected/7a7df9e2-1bec-43f6-9459-71b36b81372f-kube-api-access-tl54b\") pod \"apiserver-76f77b778f-t28vk\" (UID: \"7a7df9e2-1bec-43f6-9459-71b36b81372f\") " pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.781528 5055 request.go:700] Waited for 1.898628942s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-cluster-machine-approver/serviceaccounts/machine-approver-sa/token Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.797376 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfwpb\" (UniqueName: \"kubernetes.io/projected/18e6cd5e-9a1b-44a3-971c-e5f0784c05c2-kube-api-access-kfwpb\") pod \"machine-approver-56656f9798-2xzzt\" (UID: \"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.817519 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z55pq\" (UniqueName: \"kubernetes.io/projected/0013fda0-1d6c-42ea-991b-4560a7c988a0-kube-api-access-z55pq\") pod \"apiserver-7bbb656c7d-d7cr2\" (UID: \"0013fda0-1d6c-42ea-991b-4560a7c988a0\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.823491 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.824068 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:58.324008882 +0000 UTC m=+142.098282699 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.826205 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.836279 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8sncg\" (UniqueName: \"kubernetes.io/projected/0f5d0c4c-155d-4fdc-851c-779e57302a8c-kube-api-access-8sncg\") pod \"route-controller-manager-6576b87f9c-vx6cb\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.857369 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbqfj\" (UniqueName: \"kubernetes.io/projected/ea2861ee-56c8-4a00-93ac-ba71b211ad7a-kube-api-access-cbqfj\") pod \"openshift-controller-manager-operator-756b6f6bc6-tt849\" (UID: \"ea2861ee-56c8-4a00-93ac-ba71b211ad7a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.875891 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.888378 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.902003 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.907755 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/80947be8-e00a-4386-b49a-a42885cc132d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-mnc7h\" (UID: \"80947be8-e00a-4386-b49a-a42885cc132d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.921407 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqqq7\" (UniqueName: \"kubernetes.io/projected/499f5ce1-e32c-477b-9afc-b3c63ee6b55a-kube-api-access-bqqq7\") pod \"console-operator-58897d9998-4jldr\" (UID: \"499f5ce1-e32c-477b-9afc-b3c63ee6b55a\") " pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.922060 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.926456 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:57 crc kubenswrapper[5055]: E1011 06:55:57.926777 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:58.426749878 +0000 UTC m=+142.201023685 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.930386 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.945217 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kftfn\" (UniqueName: \"kubernetes.io/projected/55f7075b-3a94-416c-830d-dab8afe9e6e5-kube-api-access-kftfn\") pod \"openshift-config-operator-7777fb866f-gr9mj\" (UID: \"55f7075b-3a94-416c-830d-dab8afe9e6e5\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.962331 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-bound-sa-token\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.979802 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzt64\" (UniqueName: \"kubernetes.io/projected/53268028-cc7b-4417-bada-ac723f7e2527-kube-api-access-wzt64\") pod \"authentication-operator-69f744f599-vr25r\" (UID: \"53268028-cc7b-4417-bada-ac723f7e2527\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:57 crc kubenswrapper[5055]: I1011 06:55:57.999484 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45f55\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-kube-api-access-45f55\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.027543 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:58 crc kubenswrapper[5055]: E1011 06:55:58.028067 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:58.528052404 +0000 UTC m=+142.302326211 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.028445 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xslsb\" (UniqueName: \"kubernetes.io/projected/c2ee355c-7c34-4626-aac3-99d9db842b7e-kube-api-access-xslsb\") pod \"dns-operator-744455d44c-d5vkx\" (UID: \"c2ee355c-7c34-4626-aac3-99d9db842b7e\") " pod="openshift-dns-operator/dns-operator-744455d44c-d5vkx" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.047086 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9grj\" (UniqueName: \"kubernetes.io/projected/51689c82-1eb8-4080-ab6c-44759c1c5b1a-kube-api-access-b9grj\") pod \"cluster-samples-operator-665b6dd947-rvswx\" (UID: \"51689c82-1eb8-4080-ab6c-44759c1c5b1a\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.064591 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-rdv7m"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.067614 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcmbh\" (UniqueName: \"kubernetes.io/projected/80947be8-e00a-4386-b49a-a42885cc132d-kube-api-access-hcmbh\") pod \"cluster-image-registry-operator-dc59b4c8b-mnc7h\" (UID: \"80947be8-e00a-4386-b49a-a42885cc132d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.072662 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-f5tqq"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.085536 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw2cn\" (UniqueName: \"kubernetes.io/projected/ffc5a842-e6af-43b9-b41e-f5334b5bac14-kube-api-access-gw2cn\") pod \"etcd-operator-b45778765-56x9d\" (UID: \"ffc5a842-e6af-43b9-b41e-f5334b5bac14\") " pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.096216 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.103022 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qf552\" (UniqueName: \"kubernetes.io/projected/69f1192a-51c9-4939-9431-f39e329d25b8-kube-api-access-qf552\") pod \"openshift-apiserver-operator-796bbdcf4f-d6kpg\" (UID: \"69f1192a-51c9-4939-9431-f39e329d25b8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.105954 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" Oct 11 06:55:58 crc kubenswrapper[5055]: W1011 06:55:58.115498 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod18e6cd5e_9a1b_44a3_971c_e5f0784c05c2.slice/crio-4c65777deed9e8e8e42de71bae36c2cf20cb604d1d7b9b856ff0ace688ebe0e9 WatchSource:0}: Error finding container 4c65777deed9e8e8e42de71bae36c2cf20cb604d1d7b9b856ff0ace688ebe0e9: Status 404 returned error can't find the container with id 4c65777deed9e8e8e42de71bae36c2cf20cb604d1d7b9b856ff0ace688ebe0e9 Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.122602 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n942m\" (UniqueName: \"kubernetes.io/projected/199dbd10-cbbc-4f91-bc11-6a7ea9dc6609-kube-api-access-n942m\") pod \"machine-api-operator-5694c8668f-bdszt\" (UID: \"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.123324 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-jsqdp"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.129047 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:58 crc kubenswrapper[5055]: E1011 06:55:58.129431 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:58.629412472 +0000 UTC m=+142.403686279 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.141523 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/511cd957-1ec3-4efb-9700-bd83fb8a2999-bound-sa-token\") pod \"ingress-operator-5b745b69d9-j9mng\" (UID: \"511cd957-1ec3-4efb-9700-bd83fb8a2999\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.164873 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvzbq\" (UniqueName: \"kubernetes.io/projected/c31fae5c-60b1-4fd8-a23e-0e65de805c30-kube-api-access-bvzbq\") pod \"machine-config-operator-74547568cd-4dmhf\" (UID: \"c31fae5c-60b1-4fd8-a23e-0e65de805c30\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.174741 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.179876 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsjpf\" (UniqueName: \"kubernetes.io/projected/b62bbdc8-f6bd-4772-9326-8eae843a6f9f-kube-api-access-jsjpf\") pod \"csi-hostpathplugin-ft9nj\" (UID: \"b62bbdc8-f6bd-4772-9326-8eae843a6f9f\") " pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.189052 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.191915 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.206122 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssvqb\" (UniqueName: \"kubernetes.io/projected/2ed68501-b6df-40a5-b58b-669bb8ff37d6-kube-api-access-ssvqb\") pod \"control-plane-machine-set-operator-78cbb6b69f-5x9qp\" (UID: \"2ed68501-b6df-40a5-b58b-669bb8ff37d6\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5x9qp" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.211992 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.217931 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgvsj\" (UniqueName: \"kubernetes.io/projected/511cd957-1ec3-4efb-9700-bd83fb8a2999-kube-api-access-cgvsj\") pod \"ingress-operator-5b745b69d9-j9mng\" (UID: \"511cd957-1ec3-4efb-9700-bd83fb8a2999\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.229529 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:58 crc kubenswrapper[5055]: E1011 06:55:58.230024 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:58.729997253 +0000 UTC m=+142.504271060 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.239082 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.240003 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9c82\" (UniqueName: \"kubernetes.io/projected/bdcba18d-9992-4e37-9937-16f727b35e14-kube-api-access-f9c82\") pod \"router-default-5444994796-bxbhq\" (UID: \"bdcba18d-9992-4e37-9937-16f727b35e14\") " pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.257010 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.259556 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4p8m\" (UniqueName: \"kubernetes.io/projected/6adf2dd2-e065-4164-9097-49bbfbf6c587-kube-api-access-z4p8m\") pod \"machine-config-controller-84d6567774-8hdjx\" (UID: \"6adf2dd2-e065-4164-9097-49bbfbf6c587\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.263051 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-d5vkx" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.272611 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.278153 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kft9\" (UniqueName: \"kubernetes.io/projected/be40637a-a178-4125-958b-c1bc36dee57d-kube-api-access-2kft9\") pod \"marketplace-operator-79b997595-fflht\" (UID: \"be40637a-a178-4125-958b-c1bc36dee57d\") " pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.278623 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.303157 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e386a754-06bb-476a-9e52-5fd38c93edd4-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-lrs96\" (UID: \"e386a754-06bb-476a-9e52-5fd38c93edd4\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.325574 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fb49v\" (UniqueName: \"kubernetes.io/projected/1baf797b-7563-48fe-8f0f-e44b349edb96-kube-api-access-fb49v\") pod \"kube-storage-version-migrator-operator-b67b599dd-rxccz\" (UID: \"1baf797b-7563-48fe-8f0f-e44b349edb96\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.329124 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.329372 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.334553 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:58 crc kubenswrapper[5055]: E1011 06:55:58.334994 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:58.834976878 +0000 UTC m=+142.609250765 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.341163 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.344987 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.348381 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssc7k\" (UniqueName: \"kubernetes.io/projected/5cc43413-bfc7-4cc2-984a-b6843e6cf829-kube-api-access-ssc7k\") pod \"packageserver-d55dfcdfc-qmgrs\" (UID: \"5cc43413-bfc7-4cc2-984a-b6843e6cf829\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.355605 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.363066 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5x9qp" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.363338 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dnpd\" (UniqueName: \"kubernetes.io/projected/3a6c6b7a-2e81-450b-9ee6-03393169141f-kube-api-access-6dnpd\") pod \"multus-admission-controller-857f4d67dd-85c46\" (UID: \"3a6c6b7a-2e81-450b-9ee6-03393169141f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-85c46" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.376052 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-85c46" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.381283 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cx28\" (UniqueName: \"kubernetes.io/projected/7c962da4-5bac-4323-9064-0db6f79ac65d-kube-api-access-9cx28\") pod \"service-ca-9c57cc56f-bx7lp\" (UID: \"7c962da4-5bac-4323-9064-0db6f79ac65d\") " pod="openshift-service-ca/service-ca-9c57cc56f-bx7lp" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.384993 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.405534 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.409867 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-vr25r"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.422305 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8tmht\" (UID: \"abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.422650 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.425729 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mq2sv\" (UniqueName: \"kubernetes.io/projected/25f102e3-a97b-4401-a760-98f34d6fe038-kube-api-access-mq2sv\") pod \"collect-profiles-29336085-mbdqg\" (UID: \"25f102e3-a97b-4401-a760-98f34d6fe038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.427116 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.436337 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:58 crc kubenswrapper[5055]: E1011 06:55:58.436867 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:58.936846804 +0000 UTC m=+142.711120621 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.441227 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-t28vk"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.443846 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.451149 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-bx7lp" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.451554 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sjkh\" (UniqueName: \"kubernetes.io/projected/9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1-kube-api-access-7sjkh\") pod \"dns-default-rdsf5\" (UID: \"9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1\") " pod="openshift-dns/dns-default-rdsf5" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.471672 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.471949 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mq97\" (UniqueName: \"kubernetes.io/projected/47e97c97-ef9c-4a82-9530-33b9a82b34a7-kube-api-access-8mq97\") pod \"package-server-manager-789f6589d5-hk8wd\" (UID: \"47e97c97-ef9c-4a82-9530-33b9a82b34a7\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.477998 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6295\" (UniqueName: \"kubernetes.io/projected/c2809714-7dec-4c09-a838-6537ff52d750-kube-api-access-t6295\") pod \"ingress-canary-b9tgz\" (UID: \"c2809714-7dec-4c09-a838-6537ff52d750\") " pod="openshift-ingress-canary/ingress-canary-b9tgz" Oct 11 06:55:58 crc kubenswrapper[5055]: W1011 06:55:58.478916 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0013fda0_1d6c_42ea_991b_4560a7c988a0.slice/crio-d5392db6e8b3a33a731847cf9e1032dd0742dbe9b334b0981d5b220ab6d6bd3e WatchSource:0}: Error finding container d5392db6e8b3a33a731847cf9e1032dd0742dbe9b334b0981d5b220ab6d6bd3e: Status 404 returned error can't find the container with id d5392db6e8b3a33a731847cf9e1032dd0742dbe9b334b0981d5b220ab6d6bd3e Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.497985 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.500210 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qh8x4\" (UniqueName: \"kubernetes.io/projected/f480d3b1-963f-4deb-8324-01262b8e78ca-kube-api-access-qh8x4\") pod \"migrator-59844c95c7-v2zwv\" (UID: \"f480d3b1-963f-4deb-8324-01262b8e78ca\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v2zwv" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.506301 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.519524 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92rhq\" (UniqueName: \"kubernetes.io/projected/bbad9e64-0bb0-4acc-9bff-63234abcf93c-kube-api-access-92rhq\") pod \"console-f9d7485db-gjjf2\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.537897 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:58 crc kubenswrapper[5055]: E1011 06:55:58.538203 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:59.038190521 +0000 UTC m=+142.812464328 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.539179 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-rdsf5" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.544327 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqhjs\" (UniqueName: \"kubernetes.io/projected/83ae1f03-9b88-4d51-b8dc-d398f56f630f-kube-api-access-vqhjs\") pod \"service-ca-operator-777779d784-ztvqr\" (UID: \"83ae1f03-9b88-4d51-b8dc-d398f56f630f\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.547273 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-b9tgz" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.564109 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58ppg\" (UniqueName: \"kubernetes.io/projected/844e8b79-74e2-41c5-acdf-0fd560533b67-kube-api-access-58ppg\") pod \"olm-operator-6b444d44fb-gs8q9\" (UID: \"844e8b79-74e2-41c5-acdf-0fd560533b67\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.585319 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/584f9925-88f6-432d-97f5-8748946f3d68-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-hx7bz\" (UID: \"584f9925-88f6-432d-97f5-8748946f3d68\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.613819 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxcvk\" (UniqueName: \"kubernetes.io/projected/03155288-4e58-4224-bdd0-66ab4eddd34e-kube-api-access-qxcvk\") pod \"catalog-operator-68c6474976-xgrhb\" (UID: \"03155288-4e58-4224-bdd0-66ab4eddd34e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.630409 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhrjt\" (UniqueName: \"kubernetes.io/projected/b3e614f0-8db8-477f-a9a0-88e8401a6590-kube-api-access-vhrjt\") pod \"machine-config-server-v6lr4\" (UID: \"b3e614f0-8db8-477f-a9a0-88e8401a6590\") " pod="openshift-machine-config-operator/machine-config-server-v6lr4" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.638497 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:58 crc kubenswrapper[5055]: E1011 06:55:58.638624 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:59.138592816 +0000 UTC m=+142.912866633 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.638720 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849" event={"ID":"ea2861ee-56c8-4a00-93ac-ba71b211ad7a","Type":"ContainerStarted","Data":"d0db841f8cd57e3bd914eb619c3530c0a584fbe48d960a1199cb95ba4ff75a96"} Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.638735 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:58 crc kubenswrapper[5055]: E1011 06:55:58.638996 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:59.138986409 +0000 UTC m=+142.913260216 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.639551 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" event={"ID":"0f5d0c4c-155d-4fdc-851c-779e57302a8c","Type":"ContainerStarted","Data":"5f6deffc7d43bd545c53fffa328bae0a3686e11e81f66b0e722694a569cfd2ae"} Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.640075 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" event={"ID":"0013fda0-1d6c-42ea-991b-4560a7c988a0","Type":"ContainerStarted","Data":"d5392db6e8b3a33a731847cf9e1032dd0742dbe9b334b0981d5b220ab6d6bd3e"} Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.647716 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.652952 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-t28vk" event={"ID":"7a7df9e2-1bec-43f6-9459-71b36b81372f","Type":"ContainerStarted","Data":"4f23126200517a32e7eedf20a9d1a894f4626dd813e4947b6d09d99161741a02"} Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.658178 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-bxbhq" event={"ID":"bdcba18d-9992-4e37-9937-16f727b35e14","Type":"ContainerStarted","Data":"51ca6dc1f7d63d1a3622eef6b3ce6d1ebcf3005e947d5dd8682067cf62fca8d2"} Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.658970 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" event={"ID":"53268028-cc7b-4417-bada-ac723f7e2527","Type":"ContainerStarted","Data":"6810c99909d877ac6133b61f040d2531e4a4472e489a66aa510732479a264353"} Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.684345 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-rdv7m" event={"ID":"c8de3d54-de40-4186-be10-f0a394a18830","Type":"ContainerStarted","Data":"66e326178f0a2b114130232d6daa3382f44b8d7c1979b4bbfd070a75bfeace2b"} Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.684396 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-rdv7m" event={"ID":"c8de3d54-de40-4186-be10-f0a394a18830","Type":"ContainerStarted","Data":"cb42750f9f6dec1e0cc298c42a65d14fe4a26240b2e5e6164632424fb4e730a2"} Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.685006 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-rdv7m" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.686298 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.686340 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.687350 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" event={"ID":"c7f95b49-339c-401f-975f-e356a2077b01","Type":"ContainerStarted","Data":"6b73530303808f4d48ac48b5c9b3b43579dc4daee5f2bcc267f6b1a95294c416"} Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.687370 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" event={"ID":"c7f95b49-339c-401f-975f-e356a2077b01","Type":"ContainerStarted","Data":"b476c5273f0eb8d5e408d102517301b7fbc7c272ebd0b2121f717ff54d86cb52"} Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.688161 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.690410 5055 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-jsqdp container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.690446 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" podUID="c7f95b49-339c-401f-975f-e356a2077b01" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.692069 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.695416 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v2zwv" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.697187 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" event={"ID":"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2","Type":"ContainerStarted","Data":"4c65777deed9e8e8e42de71bae36c2cf20cb604d1d7b9b856ff0ace688ebe0e9"} Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.707661 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" event={"ID":"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497","Type":"ContainerStarted","Data":"f29d8197ba8e988692f296cd630ddfd8c95679ea7df62045f8032d403053a84e"} Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.720671 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.734147 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-4jldr"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.739248 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:58 crc kubenswrapper[5055]: E1011 06:55:58.739405 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:59.239381944 +0000 UTC m=+143.013655751 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.739513 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:58 crc kubenswrapper[5055]: E1011 06:55:58.740501 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:59.240492053 +0000 UTC m=+143.014765850 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.759247 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.781080 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.791073 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.815273 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.829834 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-v6lr4" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.840381 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:58 crc kubenswrapper[5055]: E1011 06:55:58.840663 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:59.340642418 +0000 UTC m=+143.114916225 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.879899 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz" Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.891005 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-56x9d"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.912627 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-d5vkx"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.926580 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.930716 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h"] Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.943025 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:58 crc kubenswrapper[5055]: E1011 06:55:58.943507 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:59.443495079 +0000 UTC m=+143.217768886 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:58 crc kubenswrapper[5055]: I1011 06:55:58.945038 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bdszt"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.044336 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:59 crc kubenswrapper[5055]: E1011 06:55:59.044627 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:59.544613228 +0000 UTC m=+143.318887035 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:59 crc kubenswrapper[5055]: W1011 06:55:59.064377 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2ee355c_7c34_4626_aac3_99d9db842b7e.slice/crio-091f1161cca6782cc9f925671f3bb220e5033c3ad66c7c7ff3a20e695ce952e4 WatchSource:0}: Error finding container 091f1161cca6782cc9f925671f3bb220e5033c3ad66c7c7ff3a20e695ce952e4: Status 404 returned error can't find the container with id 091f1161cca6782cc9f925671f3bb220e5033c3ad66c7c7ff3a20e695ce952e4 Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.146075 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5x9qp"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.154144 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.155018 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.151896 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:59 crc kubenswrapper[5055]: E1011 06:55:59.152102 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:59.652091861 +0000 UTC m=+143.426365668 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.264574 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:59 crc kubenswrapper[5055]: E1011 06:55:59.269655 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:59.769631905 +0000 UTC m=+143.543905712 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.335192 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.368145 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-ft9nj"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.369269 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:59 crc kubenswrapper[5055]: E1011 06:55:59.369815 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:59.869802872 +0000 UTC m=+143.644076679 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.393116 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-85c46"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.397019 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.398719 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.429286 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.473691 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:59 crc kubenswrapper[5055]: E1011 06:55:59.474069 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:55:59.974038621 +0000 UTC m=+143.748312438 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.474159 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:59 crc kubenswrapper[5055]: E1011 06:55:59.474665 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:55:59.974653042 +0000 UTC m=+143.748926849 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.555023 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-rdsf5"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.578639 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:59 crc kubenswrapper[5055]: E1011 06:55:59.579214 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:00.079197482 +0000 UTC m=+143.853471289 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.592489 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fflht"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.594895 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz"] Oct 11 06:55:59 crc kubenswrapper[5055]: W1011 06:55:59.604912 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9b7663d8_9252_4e52_aeb9_f5a5c5f6ffc1.slice/crio-8378f8d318befebbd0a744f6fa154c41432338ae32ee3ce2ea02ab2b6b741952 WatchSource:0}: Error finding container 8378f8d318befebbd0a744f6fa154c41432338ae32ee3ce2ea02ab2b6b741952: Status 404 returned error can't find the container with id 8378f8d318befebbd0a744f6fa154c41432338ae32ee3ce2ea02ab2b6b741952 Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.680213 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:59 crc kubenswrapper[5055]: E1011 06:55:59.680563 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:00.18055149 +0000 UTC m=+143.954825297 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:59 crc kubenswrapper[5055]: W1011 06:55:59.692587 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1baf797b_7563_48fe_8f0f_e44b349edb96.slice/crio-2fd808fec61936f123a522724bda959cea8a157f950d25a18cddfd58242363cb WatchSource:0}: Error finding container 2fd808fec61936f123a522724bda959cea8a157f950d25a18cddfd58242363cb: Status 404 returned error can't find the container with id 2fd808fec61936f123a522724bda959cea8a157f950d25a18cddfd58242363cb Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.721746 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx" event={"ID":"51689c82-1eb8-4080-ab6c-44759c1c5b1a","Type":"ContainerStarted","Data":"4bd751099a019a9615b46826f29416b81bae84b0afcc9116e0fdd75682522736"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.723804 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" event={"ID":"55f7075b-3a94-416c-830d-dab8afe9e6e5","Type":"ContainerStarted","Data":"bc6bd9b4f32e099e6be3c4d18764d3059b6731186d453b1d52f058e5e38a3b32"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.723832 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" event={"ID":"55f7075b-3a94-416c-830d-dab8afe9e6e5","Type":"ContainerStarted","Data":"60286ecb4a2d1d864efa4befb67bde1d1e315154b6c914a4e3b33ee1cd8a9d21"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.725031 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" event={"ID":"b62bbdc8-f6bd-4772-9326-8eae843a6f9f","Type":"ContainerStarted","Data":"981aa13c31189161a080df176b4395353aa29a7fabcdcc07f6b1c742a29f0107"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.726498 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-4jldr" event={"ID":"499f5ce1-e32c-477b-9afc-b3c63ee6b55a","Type":"ContainerStarted","Data":"9532d9256249e5b26b0518365d73835604a730af97e7532c6c6fb1908cd55e08"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.727709 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" event={"ID":"6adf2dd2-e065-4164-9097-49bbfbf6c587","Type":"ContainerStarted","Data":"c7dc8db694d5431850d6d37f8bc806ad2d648b83d9cb21498e5854ee9f3cc171"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.729350 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg" event={"ID":"69f1192a-51c9-4939-9431-f39e329d25b8","Type":"ContainerStarted","Data":"0f108187ae2fa42ed9ccf05d93c688711d5868cdf80e706fac814a5241d4fbf5"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.732723 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz" event={"ID":"1baf797b-7563-48fe-8f0f-e44b349edb96","Type":"ContainerStarted","Data":"2fd808fec61936f123a522724bda959cea8a157f950d25a18cddfd58242363cb"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.737573 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-rdsf5" event={"ID":"9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1","Type":"ContainerStarted","Data":"8378f8d318befebbd0a744f6fa154c41432338ae32ee3ce2ea02ab2b6b741952"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.738647 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96" event={"ID":"e386a754-06bb-476a-9e52-5fd38c93edd4","Type":"ContainerStarted","Data":"56c7545e2cafb5f45c547ae3dd98a5f945242313167ed092c07c3913735aaf5a"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.741836 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" event={"ID":"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609","Type":"ContainerStarted","Data":"83c9513c79d8ff7b3ce907d69b113df6091ae689e52ca5eabe31f466a22cf636"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.746356 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" event={"ID":"5cc43413-bfc7-4cc2-984a-b6843e6cf829","Type":"ContainerStarted","Data":"6efa4bce777160e779bfd8fcf8512f9ebfd31cec4cfafbd2968066400424d064"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.751021 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-bx7lp"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.752838 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849" event={"ID":"ea2861ee-56c8-4a00-93ac-ba71b211ad7a","Type":"ContainerStarted","Data":"001d325c8609e88bfe341559a825a2555e60af6ff9f0575dbbe2842e5a84c47e"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.756197 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.762675 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" event={"ID":"53268028-cc7b-4417-bada-ac723f7e2527","Type":"ContainerStarted","Data":"b5d6cf6a7e92614ea8434ff1cb3e82689e6bd1b1ee25ce6fef954d6b41fecbdf"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.763577 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5x9qp" event={"ID":"2ed68501-b6df-40a5-b58b-669bb8ff37d6","Type":"ContainerStarted","Data":"9540dcf61c280cf3437dd43bac46582171842df50077060d9b894c0998e5d6ad"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.767705 5055 generic.go:334] "Generic (PLEG): container finished" podID="0013fda0-1d6c-42ea-991b-4560a7c988a0" containerID="abe311ed8f7077ffcce67496b2f7265a6e0140761e7d5247dd172206a9cc8f53" exitCode=0 Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.767748 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" event={"ID":"0013fda0-1d6c-42ea-991b-4560a7c988a0","Type":"ContainerDied","Data":"abe311ed8f7077ffcce67496b2f7265a6e0140761e7d5247dd172206a9cc8f53"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.771940 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.773135 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-v2zwv"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.773289 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" event={"ID":"ffc5a842-e6af-43b9-b41e-f5334b5bac14","Type":"ContainerStarted","Data":"10b2f984965475d5f06064ed405324302ea0c2ce437535f80b55f0239a523b79"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.774631 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" event={"ID":"c31fae5c-60b1-4fd8-a23e-0e65de805c30","Type":"ContainerStarted","Data":"5f8cd39948639a13ff795aa94bfefa811325cd4f8dc268c813d255e3a6aaf400"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.774660 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" event={"ID":"c31fae5c-60b1-4fd8-a23e-0e65de805c30","Type":"ContainerStarted","Data":"081352b0e9f599b54a46fed6af3b0d767c85b46fbe9ab732ed6aa1b68f61639b"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.778729 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fflht" event={"ID":"be40637a-a178-4125-958b-c1bc36dee57d","Type":"ContainerStarted","Data":"bcb2434dc072ba2114ac568561996993dce1c7a597b3266f2ccac5c4df2b49e9"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.784248 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" event={"ID":"25f102e3-a97b-4401-a760-98f34d6fe038","Type":"ContainerStarted","Data":"ad9b912a8ca25439831c44580a37b7557f5a87a37d70439f3ff5ddbc32cd1fd0"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.785160 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:59 crc kubenswrapper[5055]: E1011 06:55:59.785776 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:00.285733282 +0000 UTC m=+144.060007089 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.788152 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" event={"ID":"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497","Type":"ContainerStarted","Data":"21f5aac3a921c1afc7eb42ac14d98d7ba0312b1bd30527221e0be1e9825bdce9"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.788906 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.790618 5055 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-f5tqq container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.7:6443/healthz\": dial tcp 10.217.0.7:6443: connect: connection refused" start-of-body= Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.790674 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" podUID="b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.7:6443/healthz\": dial tcp 10.217.0.7:6443: connect: connection refused" Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.791775 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" event={"ID":"511cd957-1ec3-4efb-9700-bd83fb8a2999","Type":"ContainerStarted","Data":"1f2bd57246fb75269d32b9e413f412de6b9c3582f64beb0c5c5151aabf9642ac"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.793133 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" event={"ID":"80947be8-e00a-4386-b49a-a42885cc132d","Type":"ContainerStarted","Data":"d738e9a041bc4a59254121421390f6b4fe6b7d81d7d24537d8b16dd86238f9a2"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.795336 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" event={"ID":"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2","Type":"ContainerStarted","Data":"814699515d7cf75b1d4a4e5a7986c9a9f81dfe71070074064db6e3edea9bd822"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.821540 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-bxbhq" event={"ID":"bdcba18d-9992-4e37-9937-16f727b35e14","Type":"ContainerStarted","Data":"12a3d5c2447f89d21a3f5806786daa264e6f82bbe9b693c8751be38c85584274"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.834505 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-v6lr4" event={"ID":"b3e614f0-8db8-477f-a9a0-88e8401a6590","Type":"ContainerStarted","Data":"c1f676dfb99c0fffca004aba46b0e2c93a6ef8c1167efae929a61c9a1f35300e"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.840627 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-d5vkx" event={"ID":"c2ee355c-7c34-4626-aac3-99d9db842b7e","Type":"ContainerStarted","Data":"091f1161cca6782cc9f925671f3bb220e5033c3ad66c7c7ff3a20e695ce952e4"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.843480 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-85c46" event={"ID":"3a6c6b7a-2e81-450b-9ee6-03393169141f","Type":"ContainerStarted","Data":"53f0b75a14ee0f961cec0852d642d9808ba39dae3ea074a51ad6e7355e1a453c"} Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.844473 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.844509 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:55:59 crc kubenswrapper[5055]: W1011 06:55:59.854585 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7c962da4_5bac_4323_9064_0db6f79ac65d.slice/crio-80b6e76352eb52d990a5b1074db42fbef9d5d24b6e395708f07b8d201dd341cb WatchSource:0}: Error finding container 80b6e76352eb52d990a5b1074db42fbef9d5d24b6e395708f07b8d201dd341cb: Status 404 returned error can't find the container with id 80b6e76352eb52d990a5b1074db42fbef9d5d24b6e395708f07b8d201dd341cb Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.854701 5055 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-jsqdp container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.854746 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" podUID="c7f95b49-339c-401f-975f-e356a2077b01" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.866480 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-b9tgz"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.867959 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.870998 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.875722 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.886339 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:59 crc kubenswrapper[5055]: E1011 06:55:59.887559 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:00.387542546 +0000 UTC m=+144.161816443 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:59 crc kubenswrapper[5055]: W1011 06:55:59.922405 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2809714_7dec_4c09_a838_6537ff52d750.slice/crio-210beafc0e6af714471cd09e737be984c87a4dd95ae6c8b0bbf3bdb4b377f81e WatchSource:0}: Error finding container 210beafc0e6af714471cd09e737be984c87a4dd95ae6c8b0bbf3bdb4b377f81e: Status 404 returned error can't find the container with id 210beafc0e6af714471cd09e737be984c87a4dd95ae6c8b0bbf3bdb4b377f81e Oct 11 06:55:59 crc kubenswrapper[5055]: W1011 06:55:59.926500 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod584f9925_88f6_432d_97f5_8748946f3d68.slice/crio-1b73775a4ff35b3edd2a2d0580ab9aff22c90f996aa450344aec521c12e280be WatchSource:0}: Error finding container 1b73775a4ff35b3edd2a2d0580ab9aff22c90f996aa450344aec521c12e280be: Status 404 returned error can't find the container with id 1b73775a4ff35b3edd2a2d0580ab9aff22c90f996aa450344aec521c12e280be Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.937843 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.940373 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-gjjf2"] Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.975813 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" podStartSLOduration=123.975795015 podStartE2EDuration="2m3.975795015s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:55:59.974148317 +0000 UTC m=+143.748422124" watchObservedRunningTime="2025-10-11 06:55:59.975795015 +0000 UTC m=+143.750068822" Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.988057 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:55:59 crc kubenswrapper[5055]: E1011 06:55:59.988245 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:00.48822073 +0000 UTC m=+144.262494537 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:55:59 crc kubenswrapper[5055]: I1011 06:55:59.988413 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:55:59 crc kubenswrapper[5055]: E1011 06:55:59.989158 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:00.489146192 +0000 UTC m=+144.263419999 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:00 crc kubenswrapper[5055]: W1011 06:56:00.039355 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod03155288_4e58_4224_bdd0_66ab4eddd34e.slice/crio-6cbd37e6902fefcd0942c43db25fdcbe27d2c7681f66cd69a89939eb3bf84d20 WatchSource:0}: Error finding container 6cbd37e6902fefcd0942c43db25fdcbe27d2c7681f66cd69a89939eb3bf84d20: Status 404 returned error can't find the container with id 6cbd37e6902fefcd0942c43db25fdcbe27d2c7681f66cd69a89939eb3bf84d20 Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.093269 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:00 crc kubenswrapper[5055]: E1011 06:56:00.093597 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:00.593574438 +0000 UTC m=+144.367848245 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.103919 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-rdv7m" podStartSLOduration=124.103900749 podStartE2EDuration="2m4.103900749s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:00.102181279 +0000 UTC m=+143.876455076" watchObservedRunningTime="2025-10-11 06:56:00.103900749 +0000 UTC m=+143.878174556" Oct 11 06:56:00 crc kubenswrapper[5055]: W1011 06:56:00.116117 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod844e8b79_74e2_41c5_acdf_0fd560533b67.slice/crio-d759e5e60df0d1ebb162c0cd75ffd3f24ad066e6ff0d332cb023645d3dc848e5 WatchSource:0}: Error finding container d759e5e60df0d1ebb162c0cd75ffd3f24ad066e6ff0d332cb023645d3dc848e5: Status 404 returned error can't find the container with id d759e5e60df0d1ebb162c0cd75ffd3f24ad066e6ff0d332cb023645d3dc848e5 Oct 11 06:56:00 crc kubenswrapper[5055]: W1011 06:56:00.118083 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbbad9e64_0bb0_4acc_9bff_63234abcf93c.slice/crio-8adaf00efe061fcb53c6a2a8e97b18c2335f07215f469df43dc42bcd943a0d3c WatchSource:0}: Error finding container 8adaf00efe061fcb53c6a2a8e97b18c2335f07215f469df43dc42bcd943a0d3c: Status 404 returned error can't find the container with id 8adaf00efe061fcb53c6a2a8e97b18c2335f07215f469df43dc42bcd943a0d3c Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.195329 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:00 crc kubenswrapper[5055]: E1011 06:56:00.195720 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:00.695704883 +0000 UTC m=+144.469978690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.304075 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:00 crc kubenswrapper[5055]: E1011 06:56:00.305497 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:00.805468805 +0000 UTC m=+144.579742612 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.356936 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.359503 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.359580 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.405562 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:00 crc kubenswrapper[5055]: E1011 06:56:00.405870 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:00.905857389 +0000 UTC m=+144.680131196 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.507000 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:00 crc kubenswrapper[5055]: E1011 06:56:00.507466 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.007446095 +0000 UTC m=+144.781719902 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.559029 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-vr25r" podStartSLOduration=124.558978799 podStartE2EDuration="2m4.558978799s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:00.553224708 +0000 UTC m=+144.327498515" watchObservedRunningTime="2025-10-11 06:56:00.558978799 +0000 UTC m=+144.333252606" Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.613745 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:00 crc kubenswrapper[5055]: E1011 06:56:00.614081 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.114070008 +0000 UTC m=+144.888343805 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.649272 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-tt849" podStartSLOduration=124.649244039 podStartE2EDuration="2m4.649244039s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:00.645336722 +0000 UTC m=+144.419610559" watchObservedRunningTime="2025-10-11 06:56:00.649244039 +0000 UTC m=+144.423517846" Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.678009 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-bxbhq" podStartSLOduration=124.677989655 podStartE2EDuration="2m4.677989655s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:00.675586221 +0000 UTC m=+144.449860028" watchObservedRunningTime="2025-10-11 06:56:00.677989655 +0000 UTC m=+144.452263462" Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.714660 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:00 crc kubenswrapper[5055]: E1011 06:56:00.714848 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.214817675 +0000 UTC m=+144.989091482 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.715971 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:00 crc kubenswrapper[5055]: E1011 06:56:00.717650 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.217634563 +0000 UTC m=+144.991908370 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.758870 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" podStartSLOduration=124.758834705 podStartE2EDuration="2m4.758834705s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:00.755589162 +0000 UTC m=+144.529862959" watchObservedRunningTime="2025-10-11 06:56:00.758834705 +0000 UTC m=+144.533108512" Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.817466 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:00 crc kubenswrapper[5055]: E1011 06:56:00.817901 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.317870042 +0000 UTC m=+145.092143849 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.851284 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" event={"ID":"ffc5a842-e6af-43b9-b41e-f5334b5bac14","Type":"ContainerStarted","Data":"1cacf1df9f39f50cc666b63c79adcc982b54410a58fc3bef762020bc894bfdf3"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.852626 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg" event={"ID":"69f1192a-51c9-4939-9431-f39e329d25b8","Type":"ContainerStarted","Data":"21c33fa4105685867b69596c138aae9d2df35ea6eaca6ff8714e94b791e2304e"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.854487 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-bx7lp" event={"ID":"7c962da4-5bac-4323-9064-0db6f79ac65d","Type":"ContainerStarted","Data":"80b6e76352eb52d990a5b1074db42fbef9d5d24b6e395708f07b8d201dd341cb"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.856304 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-v6lr4" event={"ID":"b3e614f0-8db8-477f-a9a0-88e8401a6590","Type":"ContainerStarted","Data":"74508e7cf7390054330869842e994bc9a61d7124b6e654725e424c3077a82b40"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.864286 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" event={"ID":"6adf2dd2-e065-4164-9097-49bbfbf6c587","Type":"ContainerStarted","Data":"4db5fdc981ef547b5d5f8787c95e3290481881bb3ad0d8adb01467a462fa3194"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.866548 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd" event={"ID":"47e97c97-ef9c-4a82-9530-33b9a82b34a7","Type":"ContainerStarted","Data":"f8a6f2b3acb469cc4b1835f61ea0c47e378aab61d8752494ec6f8f26b102e3ea"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.867477 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-4jldr" event={"ID":"499f5ce1-e32c-477b-9afc-b3c63ee6b55a","Type":"ContainerStarted","Data":"1dd089f247cb8e5f48a9b06bcb6776dfbed2879a54a75cc726e1d5e116c6cf6d"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.868416 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.869335 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-gjjf2" event={"ID":"bbad9e64-0bb0-4acc-9bff-63234abcf93c","Type":"ContainerStarted","Data":"8adaf00efe061fcb53c6a2a8e97b18c2335f07215f469df43dc42bcd943a0d3c"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.870229 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx" event={"ID":"51689c82-1eb8-4080-ab6c-44759c1c5b1a","Type":"ContainerStarted","Data":"fd8d104f12c5d5fec534555212d82728d46799337755fcff7ad3671259b9d55e"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.871052 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" event={"ID":"5cc43413-bfc7-4cc2-984a-b6843e6cf829","Type":"ContainerStarted","Data":"4da3a0190831934e852f7baca25288a20718fe64f8ea5fc2f71b26b1cf69f9d0"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.872120 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" event={"ID":"18e6cd5e-9a1b-44a3-971c-e5f0784c05c2","Type":"ContainerStarted","Data":"4e5c8111f1407ad7add795b4f2f029f8658d0bba4006138e90b73914e5e9ea3b"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.873121 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht" event={"ID":"abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c","Type":"ContainerStarted","Data":"c1208310875b73d89f184a83538b462af1b59e57b37b155f841522d0c83fef53"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.873820 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" event={"ID":"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609","Type":"ContainerStarted","Data":"d101b9666c925186e3e9fcd0b4cde02ec4994b831addf322ac92950f98c07a93"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.874654 5055 generic.go:334] "Generic (PLEG): container finished" podID="7a7df9e2-1bec-43f6-9459-71b36b81372f" containerID="b6e0997d207974d23deffc5e617dac6fb32d0970f4dbcda62c3f5b38e15c5471" exitCode=0 Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.874687 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-t28vk" event={"ID":"7a7df9e2-1bec-43f6-9459-71b36b81372f","Type":"ContainerDied","Data":"b6e0997d207974d23deffc5e617dac6fb32d0970f4dbcda62c3f5b38e15c5471"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.875588 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" event={"ID":"844e8b79-74e2-41c5-acdf-0fd560533b67","Type":"ContainerStarted","Data":"d759e5e60df0d1ebb162c0cd75ffd3f24ad066e6ff0d332cb023645d3dc848e5"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.879317 5055 patch_prober.go:28] interesting pod/console-operator-58897d9998-4jldr container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.879380 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-4jldr" podUID="499f5ce1-e32c-477b-9afc-b3c63ee6b55a" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.880471 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" event={"ID":"80947be8-e00a-4386-b49a-a42885cc132d","Type":"ContainerStarted","Data":"921dc7d74b8fb3736d54a183b52e4602588e2ab735a941eb013bbbbb3be8716f"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.887590 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-56x9d" podStartSLOduration=124.887565432 podStartE2EDuration="2m4.887565432s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:00.879912704 +0000 UTC m=+144.654186531" watchObservedRunningTime="2025-10-11 06:56:00.887565432 +0000 UTC m=+144.661839239" Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.905882 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-85c46" event={"ID":"3a6c6b7a-2e81-450b-9ee6-03393169141f","Type":"ContainerStarted","Data":"02b4d62d8f9e3a5d070772e6d47b108cbd21b4fde4449332c975120f142167ce"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.907729 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-b9tgz" event={"ID":"c2809714-7dec-4c09-a838-6537ff52d750","Type":"ContainerStarted","Data":"210beafc0e6af714471cd09e737be984c87a4dd95ae6c8b0bbf3bdb4b377f81e"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.908460 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v2zwv" event={"ID":"f480d3b1-963f-4deb-8324-01262b8e78ca","Type":"ContainerStarted","Data":"d1f8bf319d168df30ee52e60ff9f67de18a97dfbbdd99c5fb96f91430189187b"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.909277 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" event={"ID":"511cd957-1ec3-4efb-9700-bd83fb8a2999","Type":"ContainerStarted","Data":"2e90055165abf45517514eba1c2e0cb49e4f0edb25819ee0715719a4646a90ae"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.913981 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-4jldr" podStartSLOduration=124.913967476 podStartE2EDuration="2m4.913967476s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:00.899278032 +0000 UTC m=+144.673551839" watchObservedRunningTime="2025-10-11 06:56:00.913967476 +0000 UTC m=+144.688241283" Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.917973 5055 generic.go:334] "Generic (PLEG): container finished" podID="55f7075b-3a94-416c-830d-dab8afe9e6e5" containerID="bc6bd9b4f32e099e6be3c4d18764d3059b6731186d453b1d52f058e5e38a3b32" exitCode=0 Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.918103 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" event={"ID":"55f7075b-3a94-416c-830d-dab8afe9e6e5","Type":"ContainerDied","Data":"bc6bd9b4f32e099e6be3c4d18764d3059b6731186d453b1d52f058e5e38a3b32"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.919675 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:00 crc kubenswrapper[5055]: E1011 06:56:00.920185 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.420172003 +0000 UTC m=+145.194445800 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.930579 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-d5vkx" event={"ID":"c2ee355c-7c34-4626-aac3-99d9db842b7e","Type":"ContainerStarted","Data":"a99cc3ef4b346c281fa9d4ec61937130668483fe6090fc4bc5d2993348626e9f"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.936303 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" event={"ID":"0f5d0c4c-155d-4fdc-851c-779e57302a8c","Type":"ContainerStarted","Data":"1891e00814cfe184e8128e8367293916a86dc16ed80b47ebfd2f65dc24988713"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.936533 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.945780 5055 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-vx6cb container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.945835 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" podUID="0f5d0c4c-155d-4fdc-851c-779e57302a8c" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.958857 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr" event={"ID":"83ae1f03-9b88-4d51-b8dc-d398f56f630f","Type":"ContainerStarted","Data":"5549ef84588aefaeeba6b1f13abb779e56b218146bf5e56cec85905395db1454"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.968825 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" event={"ID":"03155288-4e58-4224-bdd0-66ab4eddd34e","Type":"ContainerStarted","Data":"6cbd37e6902fefcd0942c43db25fdcbe27d2c7681f66cd69a89939eb3bf84d20"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.974382 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-2xzzt" podStartSLOduration=124.973682066 podStartE2EDuration="2m4.973682066s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:00.914546556 +0000 UTC m=+144.688820363" watchObservedRunningTime="2025-10-11 06:56:00.973682066 +0000 UTC m=+144.747955873" Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.976553 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5x9qp" event={"ID":"2ed68501-b6df-40a5-b58b-669bb8ff37d6","Type":"ContainerStarted","Data":"ba43683471dec9b7d9c0cef488d0827f734bdb9642ec3feed6c4a7db61943bdc"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.992062 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz" event={"ID":"584f9925-88f6-432d-97f5-8748946f3d68","Type":"ContainerStarted","Data":"1b73775a4ff35b3edd2a2d0580ab9aff22c90f996aa450344aec521c12e280be"} Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.992876 5055 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-f5tqq container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.7:6443/healthz\": dial tcp 10.217.0.7:6443: connect: connection refused" start-of-body= Oct 11 06:56:00 crc kubenswrapper[5055]: I1011 06:56:00.992934 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" podUID="b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.7:6443/healthz\": dial tcp 10.217.0.7:6443: connect: connection refused" Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.022880 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.025742 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.525705517 +0000 UTC m=+145.299979324 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.026229 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.028639 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.528620549 +0000 UTC m=+145.302894356 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.045668 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" podStartSLOduration=125.045647965 podStartE2EDuration="2m5.045647965s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:01.043004593 +0000 UTC m=+144.817278400" watchObservedRunningTime="2025-10-11 06:56:01.045647965 +0000 UTC m=+144.819921772" Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.109121 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mnc7h" podStartSLOduration=125.109092506 podStartE2EDuration="2m5.109092506s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:01.077140408 +0000 UTC m=+144.851414215" watchObservedRunningTime="2025-10-11 06:56:01.109092506 +0000 UTC m=+144.883366313" Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.111304 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-5x9qp" podStartSLOduration=125.111297424 podStartE2EDuration="2m5.111297424s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:01.108253667 +0000 UTC m=+144.882527474" watchObservedRunningTime="2025-10-11 06:56:01.111297424 +0000 UTC m=+144.885571251" Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.127915 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.128164 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.628114282 +0000 UTC m=+145.402388099 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.129164 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.131795 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.63175644 +0000 UTC m=+145.406030457 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.230902 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.231121 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.731096177 +0000 UTC m=+145.505369984 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.231338 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.231665 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.731658117 +0000 UTC m=+145.505931924 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.332543 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.332684 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.832657312 +0000 UTC m=+145.606931119 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.332813 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.333134 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.833122739 +0000 UTC m=+145.607396546 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.358383 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.358438 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.434085 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.434463 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.934435555 +0000 UTC m=+145.708709362 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.434661 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.435231 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:01.935211322 +0000 UTC m=+145.709485339 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.536181 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.536541 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.036522619 +0000 UTC m=+145.810796426 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.638001 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.638328 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.138316472 +0000 UTC m=+145.912590279 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.738718 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.738880 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.238853581 +0000 UTC m=+146.013127388 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.739726 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.740221 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.240208999 +0000 UTC m=+146.014482806 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.841054 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.841205 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.341184253 +0000 UTC m=+146.115458070 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.841485 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.841806 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.341797395 +0000 UTC m=+146.116071202 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.943608 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.943843 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.443808996 +0000 UTC m=+146.218082803 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.944517 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:01 crc kubenswrapper[5055]: E1011 06:56:01.945087 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.44506897 +0000 UTC m=+146.219342777 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:01 crc kubenswrapper[5055]: I1011 06:56:01.991831 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz" event={"ID":"1baf797b-7563-48fe-8f0f-e44b349edb96","Type":"ContainerStarted","Data":"58a032256f598d1669d039498807460e1aa19eebc547362cea2971ca0d334034"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.000934 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-rdsf5" event={"ID":"9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1","Type":"ContainerStarted","Data":"d7c073ed4558e6ca8f822bfcbd780e00ed33267eafcb4c2f60336146c60879ac"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.002943 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" event={"ID":"25f102e3-a97b-4401-a760-98f34d6fe038","Type":"ContainerStarted","Data":"c8abc14887fe6b1e823f609b31de625ff1fe1c99704f169a579d6b80fa424e8a"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.006481 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz" event={"ID":"584f9925-88f6-432d-97f5-8748946f3d68","Type":"ContainerStarted","Data":"66a6508d5081b429389c9b585ba7a8ba951d4c35011eaf573b6ee7b3524ac428"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.008848 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-rxccz" podStartSLOduration=126.008825702 podStartE2EDuration="2m6.008825702s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:02.006473619 +0000 UTC m=+145.780747426" watchObservedRunningTime="2025-10-11 06:56:02.008825702 +0000 UTC m=+145.783099509" Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.010151 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-gjjf2" event={"ID":"bbad9e64-0bb0-4acc-9bff-63234abcf93c","Type":"ContainerStarted","Data":"d1caef781fbca211e47cdf81c67e07666f0b9955b9a4f0e629b9ccb273244636"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.011694 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" event={"ID":"03155288-4e58-4224-bdd0-66ab4eddd34e","Type":"ContainerStarted","Data":"4e24e2fd5c2a79f848c7e96fbd5d2bca733e2986389889fafddd5ab68d8e938c"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.014249 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd" event={"ID":"47e97c97-ef9c-4a82-9530-33b9a82b34a7","Type":"ContainerStarted","Data":"5fffacec6841e3f1bc2aac2938ba1e4af8e1aa6f16793d5ebca92d1b1f517b11"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.017449 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" event={"ID":"199dbd10-cbbc-4f91-bc11-6a7ea9dc6609","Type":"ContainerStarted","Data":"74239387f1cef31ebd390d8b7a18ec3dbb580a591e0bd7991b711440dca82b29"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.019403 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-bx7lp" event={"ID":"7c962da4-5bac-4323-9064-0db6f79ac65d","Type":"ContainerStarted","Data":"290719c8ee116c67f1e16ffc7c1f357ab5ea4602d1c861407cc4f69a7bd93c3a"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.022464 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" event={"ID":"844e8b79-74e2-41c5-acdf-0fd560533b67","Type":"ContainerStarted","Data":"6b3f26c8d170bd1487a15faebfde719158923db1916dbb3a6b4e725a9b41e599"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.028153 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-hx7bz" podStartSLOduration=126.028034594 podStartE2EDuration="2m6.028034594s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:02.026628715 +0000 UTC m=+145.800902522" watchObservedRunningTime="2025-10-11 06:56:02.028034594 +0000 UTC m=+145.802308421" Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.034793 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" event={"ID":"c31fae5c-60b1-4fd8-a23e-0e65de805c30","Type":"ContainerStarted","Data":"28278e4b6d1d03906f59de81386897ac2828650fb235b89dc0fd220082935471"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.036481 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht" event={"ID":"abe0fe8e-579b-4e1c-89c5-1b54ccd69d9c","Type":"ContainerStarted","Data":"f62e7fda0519e56a947e710ede04912481d90d908f816c2a112d174fa30c43fc"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.037680 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fflht" event={"ID":"be40637a-a178-4125-958b-c1bc36dee57d","Type":"ContainerStarted","Data":"b1b5ef3530935f892acb9a5acf67edba1e967c5eb3758998822fd4094f9445b5"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.038098 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.039378 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" event={"ID":"511cd957-1ec3-4efb-9700-bd83fb8a2999","Type":"ContainerStarted","Data":"7ce3a8aec6c723aba1ac7b4c6f61b90fdef02111f130bd1885f5051cff92d690"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.041861 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx" event={"ID":"51689c82-1eb8-4080-ab6c-44759c1c5b1a","Type":"ContainerStarted","Data":"dfb6a9d37399d39bfa0d7e1ba893b09e04dd1eb30fc8a27cc2c32131dc31f37e"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.043526 5055 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-fflht container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.19:8080/healthz\": dial tcp 10.217.0.19:8080: connect: connection refused" start-of-body= Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.043583 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-fflht" podUID="be40637a-a178-4125-958b-c1bc36dee57d" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.19:8080/healthz\": dial tcp 10.217.0.19:8080: connect: connection refused" Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.043966 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96" event={"ID":"e386a754-06bb-476a-9e52-5fd38c93edd4","Type":"ContainerStarted","Data":"ca0d9943e751bc4db8b00353b63d6dfc51b5c3a7aa051e96b824b3dd4b394a30"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.046040 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v2zwv" event={"ID":"f480d3b1-963f-4deb-8324-01262b8e78ca","Type":"ContainerStarted","Data":"f4d9aca88f6c6f48855ec1fd7a543966ee2c3e1bd497da764a0d60e3770e9776"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.046524 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:02 crc kubenswrapper[5055]: E1011 06:56:02.046677 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.546659266 +0000 UTC m=+146.320933073 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.046849 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:02 crc kubenswrapper[5055]: E1011 06:56:02.047315 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.547290548 +0000 UTC m=+146.321564355 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.048944 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" event={"ID":"0013fda0-1d6c-42ea-991b-4560a7c988a0","Type":"ContainerStarted","Data":"6403e19fc8221bea439d477bdce6f86a12b8ae4afa4888641989103e59799daa"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.051639 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr" event={"ID":"83ae1f03-9b88-4d51-b8dc-d398f56f630f","Type":"ContainerStarted","Data":"4941ccdf6c9de510984ed6d0eaac64c64f0bb8acf8c156582796e2d0718523a5"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.055721 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-fflht" podStartSLOduration=126.055705533 podStartE2EDuration="2m6.055705533s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:02.054107317 +0000 UTC m=+145.828381124" watchObservedRunningTime="2025-10-11 06:56:02.055705533 +0000 UTC m=+145.829979340" Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.057685 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-b9tgz" event={"ID":"c2809714-7dec-4c09-a838-6537ff52d750","Type":"ContainerStarted","Data":"f80bbde2a54a88ec0bd3c3623f5cec3bed4878aa21d55d7c8d2f5f5f37d43a5c"} Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.060422 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.060860 5055 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-vx6cb container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.060908 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" podUID="0f5d0c4c-155d-4fdc-851c-779e57302a8c" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.060996 5055 patch_prober.go:28] interesting pod/console-operator-58897d9998-4jldr container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.061054 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-4jldr" podUID="499f5ce1-e32c-477b-9afc-b3c63ee6b55a" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.073878 5055 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-qmgrs container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.20:5443/healthz\": dial tcp 10.217.0.20:5443: connect: connection refused" start-of-body= Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.073950 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" podUID="5cc43413-bfc7-4cc2-984a-b6843e6cf829" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.20:5443/healthz\": dial tcp 10.217.0.20:5443: connect: connection refused" Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.076823 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" podStartSLOduration=126.07679525 podStartE2EDuration="2m6.07679525s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:02.075115471 +0000 UTC m=+145.849389278" watchObservedRunningTime="2025-10-11 06:56:02.07679525 +0000 UTC m=+145.851069057" Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.100977 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-d6kpg" podStartSLOduration=126.100960916 podStartE2EDuration="2m6.100960916s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:02.100291493 +0000 UTC m=+145.874565300" watchObservedRunningTime="2025-10-11 06:56:02.100960916 +0000 UTC m=+145.875234723" Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.125256 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-v6lr4" podStartSLOduration=7.125231576 podStartE2EDuration="7.125231576s" podCreationTimestamp="2025-10-11 06:55:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:02.123965911 +0000 UTC m=+145.898239718" watchObservedRunningTime="2025-10-11 06:56:02.125231576 +0000 UTC m=+145.899505393" Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.152981 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.166628 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-b9tgz" podStartSLOduration=7.166607594 podStartE2EDuration="7.166607594s" podCreationTimestamp="2025-10-11 06:55:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:02.152176849 +0000 UTC m=+145.926450666" watchObservedRunningTime="2025-10-11 06:56:02.166607594 +0000 UTC m=+145.940881401" Oct 11 06:56:02 crc kubenswrapper[5055]: E1011 06:56:02.169738 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.669710783 +0000 UTC m=+146.443984590 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.280639 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:02 crc kubenswrapper[5055]: E1011 06:56:02.281058 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.78104229 +0000 UTC m=+146.555316097 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.376535 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:02 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:02 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:02 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.376934 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.382803 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:02 crc kubenswrapper[5055]: E1011 06:56:02.383020 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.882976008 +0000 UTC m=+146.657249815 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.383262 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:02 crc kubenswrapper[5055]: E1011 06:56:02.383636 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.883622891 +0000 UTC m=+146.657896698 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.422221 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.422282 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.484438 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:02 crc kubenswrapper[5055]: E1011 06:56:02.484845 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:02.984831584 +0000 UTC m=+146.759105391 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.586209 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:02 crc kubenswrapper[5055]: E1011 06:56:02.586607 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:03.086589046 +0000 UTC m=+146.860862863 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.687034 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:02 crc kubenswrapper[5055]: E1011 06:56:02.687226 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:03.187201798 +0000 UTC m=+146.961475605 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.687370 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:02 crc kubenswrapper[5055]: E1011 06:56:02.687778 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:03.187746727 +0000 UTC m=+146.962020594 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.788212 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:02 crc kubenswrapper[5055]: E1011 06:56:02.788787 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:03.288742272 +0000 UTC m=+147.063016119 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.890367 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:02 crc kubenswrapper[5055]: E1011 06:56:02.890960 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:03.390924849 +0000 UTC m=+147.165198716 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.995451 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:02 crc kubenswrapper[5055]: E1011 06:56:02.995512 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:03.49549558 +0000 UTC m=+147.269769377 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:02 crc kubenswrapper[5055]: E1011 06:56:02.996014 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:03.496005697 +0000 UTC m=+147.270279504 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:02 crc kubenswrapper[5055]: I1011 06:56:02.995755 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.062732 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" event={"ID":"55f7075b-3a94-416c-830d-dab8afe9e6e5","Type":"ContainerStarted","Data":"48c9bc00e9ff60bf8a0a64ac6699a206fe46e525445ffc9d3aefcbbc52dbbc62"} Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.062826 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.064173 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd" event={"ID":"47e97c97-ef9c-4a82-9530-33b9a82b34a7","Type":"ContainerStarted","Data":"803d9f10959b73d0fb44baef0db37ba673592512cf8275466bc860c6164461b2"} Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.064300 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.066167 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-d5vkx" event={"ID":"c2ee355c-7c34-4626-aac3-99d9db842b7e","Type":"ContainerStarted","Data":"033b2298649243e9f8619e201a4f44a086d77543d8cc6242d515c0786a0d6bed"} Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.068482 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-t28vk" event={"ID":"7a7df9e2-1bec-43f6-9459-71b36b81372f","Type":"ContainerStarted","Data":"c4e1673280fde2b4e40438b74db977df517aa96052a2f7cb6fe0e91877076c30"} Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.068549 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-t28vk" event={"ID":"7a7df9e2-1bec-43f6-9459-71b36b81372f","Type":"ContainerStarted","Data":"26736449613b6560cd27594c4275e39e170f66be47dd3f1180b6ad6e5a0cf5f2"} Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.070471 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-85c46" event={"ID":"3a6c6b7a-2e81-450b-9ee6-03393169141f","Type":"ContainerStarted","Data":"593c361ba843d330aeae455c27632eff9309137adce6758744f0726aec6a51ff"} Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.071974 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-rdsf5" event={"ID":"9b7663d8-9252-4e52-aeb9-f5a5c5f6ffc1","Type":"ContainerStarted","Data":"f485637b691e9d6bd16b75abfc2689f631f0ceaae6bb9b2c124ef4e5a0b3e0f5"} Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.072125 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-rdsf5" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.073104 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v2zwv" event={"ID":"f480d3b1-963f-4deb-8324-01262b8e78ca","Type":"ContainerStarted","Data":"c5eaf80e3b0e332d484c1263a110bf9660db90b29872c75d259a3fcf1085c390"} Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.077505 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" event={"ID":"6adf2dd2-e065-4164-9097-49bbfbf6c587","Type":"ContainerStarted","Data":"3c9fd9eba08d6428bb66e8f96e80207f0bd0991857a957815e8a7412262c61fb"} Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.077541 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.080018 5055 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xgrhb container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.080068 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" podUID="03155288-4e58-4224-bdd0-66ab4eddd34e" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.080018 5055 patch_prober.go:28] interesting pod/console-operator-58897d9998-4jldr container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.080124 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-4jldr" podUID="499f5ce1-e32c-477b-9afc-b3c63ee6b55a" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.080443 5055 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-qmgrs container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.20:5443/healthz\": dial tcp 10.217.0.20:5443: connect: connection refused" start-of-body= Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.080478 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" podUID="5cc43413-bfc7-4cc2-984a-b6843e6cf829" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.20:5443/healthz\": dial tcp 10.217.0.20:5443: connect: connection refused" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.080884 5055 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-fflht container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.19:8080/healthz\": dial tcp 10.217.0.19:8080: connect: connection refused" start-of-body= Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.080933 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-fflht" podUID="be40637a-a178-4125-958b-c1bc36dee57d" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.19:8080/healthz\": dial tcp 10.217.0.19:8080: connect: connection refused" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.091852 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" podStartSLOduration=127.091824862 podStartE2EDuration="2m7.091824862s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.089866023 +0000 UTC m=+146.864140220" watchObservedRunningTime="2025-10-11 06:56:03.091824862 +0000 UTC m=+146.866098669" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.099162 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:03 crc kubenswrapper[5055]: E1011 06:56:03.099447 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:03.599380716 +0000 UTC m=+147.373654523 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.099966 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:03 crc kubenswrapper[5055]: E1011 06:56:03.100470 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:03.600460114 +0000 UTC m=+147.374733921 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.112292 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-rdsf5" podStartSLOduration=8.112270957 podStartE2EDuration="8.112270957s" podCreationTimestamp="2025-10-11 06:55:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.108775535 +0000 UTC m=+146.883049342" watchObservedRunningTime="2025-10-11 06:56:03.112270957 +0000 UTC m=+146.886544764" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.150331 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd" podStartSLOduration=127.150307539 podStartE2EDuration="2m7.150307539s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.134749464 +0000 UTC m=+146.909023271" watchObservedRunningTime="2025-10-11 06:56:03.150307539 +0000 UTC m=+146.924581346" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.151643 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-bx7lp" podStartSLOduration=127.151636585 podStartE2EDuration="2m7.151636585s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.149328955 +0000 UTC m=+146.923602772" watchObservedRunningTime="2025-10-11 06:56:03.151636585 +0000 UTC m=+146.925910392" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.174736 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" podStartSLOduration=127.174714533 podStartE2EDuration="2m7.174714533s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.174306649 +0000 UTC m=+146.948580466" watchObservedRunningTime="2025-10-11 06:56:03.174714533 +0000 UTC m=+146.948988340" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.196668 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-rvswx" podStartSLOduration=127.196652071 podStartE2EDuration="2m7.196652071s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.19461472 +0000 UTC m=+146.968888547" watchObservedRunningTime="2025-10-11 06:56:03.196652071 +0000 UTC m=+146.970925878" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.200809 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:03 crc kubenswrapper[5055]: E1011 06:56:03.200985 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:03.700959062 +0000 UTC m=+147.475232859 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.201416 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:03 crc kubenswrapper[5055]: E1011 06:56:03.202916 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:03.70290452 +0000 UTC m=+147.477178327 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.212825 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-v2zwv" podStartSLOduration=127.212802957 podStartE2EDuration="2m7.212802957s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.211693348 +0000 UTC m=+146.985967155" watchObservedRunningTime="2025-10-11 06:56:03.212802957 +0000 UTC m=+146.987076764" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.228294 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-d5vkx" podStartSLOduration=127.228279658 podStartE2EDuration="2m7.228279658s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.227498891 +0000 UTC m=+147.001772698" watchObservedRunningTime="2025-10-11 06:56:03.228279658 +0000 UTC m=+147.002553465" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.273868 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" podStartSLOduration=127.273852174 podStartE2EDuration="2m7.273852174s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.270846348 +0000 UTC m=+147.045120155" watchObservedRunningTime="2025-10-11 06:56:03.273852174 +0000 UTC m=+147.048125981" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.274342 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-gjjf2" podStartSLOduration=127.274335811 podStartE2EDuration="2m7.274335811s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.255783501 +0000 UTC m=+147.030057318" watchObservedRunningTime="2025-10-11 06:56:03.274335811 +0000 UTC m=+147.048609628" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.301999 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-j9mng" podStartSLOduration=127.301973908 podStartE2EDuration="2m7.301973908s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.299115828 +0000 UTC m=+147.073389655" watchObservedRunningTime="2025-10-11 06:56:03.301973908 +0000 UTC m=+147.076247805" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.302959 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:03 crc kubenswrapper[5055]: E1011 06:56:03.303238 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:03.803221892 +0000 UTC m=+147.577495699 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.322935 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-lrs96" podStartSLOduration=127.322908931 podStartE2EDuration="2m7.322908931s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.321918106 +0000 UTC m=+147.096191913" watchObservedRunningTime="2025-10-11 06:56:03.322908931 +0000 UTC m=+147.097182748" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.352805 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8tmht" podStartSLOduration=127.352786757 podStartE2EDuration="2m7.352786757s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.350423874 +0000 UTC m=+147.124697681" watchObservedRunningTime="2025-10-11 06:56:03.352786757 +0000 UTC m=+147.127060564" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.359702 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:03 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:03 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:03 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.360129 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.376448 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" podStartSLOduration=127.376428744 podStartE2EDuration="2m7.376428744s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.375945637 +0000 UTC m=+147.150219444" watchObservedRunningTime="2025-10-11 06:56:03.376428744 +0000 UTC m=+147.150702551" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.403427 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-4dmhf" podStartSLOduration=127.403377868 podStartE2EDuration="2m7.403377868s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.402476856 +0000 UTC m=+147.176750673" watchObservedRunningTime="2025-10-11 06:56:03.403377868 +0000 UTC m=+147.177651675" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.404693 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:03 crc kubenswrapper[5055]: E1011 06:56:03.405256 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:03.905231823 +0000 UTC m=+147.679505630 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.427041 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" podStartSLOduration=127.427023735 podStartE2EDuration="2m7.427023735s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.425424069 +0000 UTC m=+147.199697876" watchObservedRunningTime="2025-10-11 06:56:03.427023735 +0000 UTC m=+147.201297542" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.458377 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-bdszt" podStartSLOduration=127.458356762 podStartE2EDuration="2m7.458356762s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.456390953 +0000 UTC m=+147.230664780" watchObservedRunningTime="2025-10-11 06:56:03.458356762 +0000 UTC m=+147.232630569" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.478952 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-85c46" podStartSLOduration=127.478923542 podStartE2EDuration="2m7.478923542s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.478024731 +0000 UTC m=+147.252298548" watchObservedRunningTime="2025-10-11 06:56:03.478923542 +0000 UTC m=+147.253197349" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.497472 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-8hdjx" podStartSLOduration=127.49744155 podStartE2EDuration="2m7.49744155s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.495077788 +0000 UTC m=+147.269351605" watchObservedRunningTime="2025-10-11 06:56:03.49744155 +0000 UTC m=+147.271715357" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.513840 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:03 crc kubenswrapper[5055]: E1011 06:56:03.514087 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.014045682 +0000 UTC m=+147.788319489 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.514602 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:03 crc kubenswrapper[5055]: E1011 06:56:03.515107 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.015097228 +0000 UTC m=+147.789371035 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.526250 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-ztvqr" podStartSLOduration=127.526230948 podStartE2EDuration="2m7.526230948s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:03.524060772 +0000 UTC m=+147.298334579" watchObservedRunningTime="2025-10-11 06:56:03.526230948 +0000 UTC m=+147.300504745" Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.615258 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:03 crc kubenswrapper[5055]: E1011 06:56:03.615547 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.115522024 +0000 UTC m=+147.889795831 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.615759 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:03 crc kubenswrapper[5055]: E1011 06:56:03.616369 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.116343653 +0000 UTC m=+147.890617460 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.717553 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:03 crc kubenswrapper[5055]: E1011 06:56:03.717740 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.217716681 +0000 UTC m=+147.991990488 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.718129 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:03 crc kubenswrapper[5055]: E1011 06:56:03.718458 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.218441217 +0000 UTC m=+147.992715024 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.819627 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:03 crc kubenswrapper[5055]: E1011 06:56:03.819953 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.31993809 +0000 UTC m=+148.094211897 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:03 crc kubenswrapper[5055]: I1011 06:56:03.920832 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:03 crc kubenswrapper[5055]: E1011 06:56:03.921477 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.421465734 +0000 UTC m=+148.195739541 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.022791 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.022930 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.522906304 +0000 UTC m=+148.297180112 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.023085 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.023422 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.523413912 +0000 UTC m=+148.297687719 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.080945 5055 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-xgrhb container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.080992 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" podUID="03155288-4e58-4224-bdd0-66ab4eddd34e" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.29:8443/healthz\": dial tcp 10.217.0.29:8443: connect: connection refused" Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.120674 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-t28vk" podStartSLOduration=128.120656996 podStartE2EDuration="2m8.120656996s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:04.118944916 +0000 UTC m=+147.893218733" watchObservedRunningTime="2025-10-11 06:56:04.120656996 +0000 UTC m=+147.894930813" Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.124523 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.124720 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.624699468 +0000 UTC m=+148.398973275 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.124794 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.125195 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.625183665 +0000 UTC m=+148.399457552 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.226484 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.226697 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.726669517 +0000 UTC m=+148.500943334 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.227064 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.228103 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.728092237 +0000 UTC m=+148.502366264 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.328639 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.328881 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.828851264 +0000 UTC m=+148.603125071 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.329050 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.329354 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.829342241 +0000 UTC m=+148.603616048 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.359495 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:04 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:04 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:04 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.359564 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.430338 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.430594 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.930547554 +0000 UTC m=+148.704821361 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.430933 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.431260 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:04.931243989 +0000 UTC m=+148.705517796 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.531621 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.531775 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.031738546 +0000 UTC m=+148.806012353 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.532195 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.532543 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.032535434 +0000 UTC m=+148.806809241 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.633502 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.633700 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.133669174 +0000 UTC m=+148.907942991 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.634240 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.634596 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.134585277 +0000 UTC m=+148.908859094 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.735308 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.735587 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.235557621 +0000 UTC m=+149.009831438 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.735674 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.736072 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.236060809 +0000 UTC m=+149.010334676 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.836976 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.837145 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.337111156 +0000 UTC m=+149.111384973 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.837242 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.837284 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.837314 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.837943 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.337920634 +0000 UTC m=+149.112194441 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.842923 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.862069 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.938458 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.938638 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.438612929 +0000 UTC m=+149.212886736 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.938676 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.938721 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.938749 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:04 crc kubenswrapper[5055]: E1011 06:56:04.939039 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.439027654 +0000 UTC m=+149.213301461 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.943412 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:56:04 crc kubenswrapper[5055]: I1011 06:56:04.972884 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.007102 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.013739 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.051877 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:05 crc kubenswrapper[5055]: E1011 06:56:05.052847 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.552826087 +0000 UTC m=+149.327099894 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.116048 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" event={"ID":"b62bbdc8-f6bd-4772-9326-8eae843a6f9f","Type":"ContainerStarted","Data":"02255e50c6f392c8f42a638cddffaa82c08963bedfc37792832b6af5b0b2d0a6"} Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.131072 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.155665 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:05 crc kubenswrapper[5055]: E1011 06:56:05.156319 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.656300489 +0000 UTC m=+149.430574296 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.256952 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:05 crc kubenswrapper[5055]: E1011 06:56:05.257223 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.757199141 +0000 UTC m=+149.531472948 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.257924 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:05 crc kubenswrapper[5055]: E1011 06:56:05.258316 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.75830077 +0000 UTC m=+149.532574587 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.359779 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:05 crc kubenswrapper[5055]: E1011 06:56:05.360102 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.860082003 +0000 UTC m=+149.634355820 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.364308 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:05 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:05 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:05 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.364353 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.461557 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:05 crc kubenswrapper[5055]: E1011 06:56:05.461942 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:05.961928498 +0000 UTC m=+149.736202305 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.564535 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:05 crc kubenswrapper[5055]: E1011 06:56:05.564710 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:06.064683855 +0000 UTC m=+149.838957662 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.565132 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:05 crc kubenswrapper[5055]: E1011 06:56:05.565458 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:06.065443162 +0000 UTC m=+149.839716969 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.667027 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:05 crc kubenswrapper[5055]: E1011 06:56:05.667441 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:06.16742325 +0000 UTC m=+149.941697057 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.769537 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:05 crc kubenswrapper[5055]: E1011 06:56:05.769897 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:06.269884097 +0000 UTC m=+150.044157904 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.811221 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.870449 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:05 crc kubenswrapper[5055]: E1011 06:56:05.871118 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:06.3711038 +0000 UTC m=+150.145377607 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:05 crc kubenswrapper[5055]: I1011 06:56:05.972598 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:05 crc kubenswrapper[5055]: E1011 06:56:05.973089 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:06.4730775 +0000 UTC m=+150.247351307 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:06 crc kubenswrapper[5055]: W1011 06:56:06.051203 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-fb0d453ab5aa2905bd4e9a4c1467635894027929817e23e48f046d8c9834f129 WatchSource:0}: Error finding container fb0d453ab5aa2905bd4e9a4c1467635894027929817e23e48f046d8c9834f129: Status 404 returned error can't find the container with id fb0d453ab5aa2905bd4e9a4c1467635894027929817e23e48f046d8c9834f129 Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.079440 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:06 crc kubenswrapper[5055]: E1011 06:56:06.079691 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:06.579676712 +0000 UTC m=+150.353950519 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.150416 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"fb0d453ab5aa2905bd4e9a4c1467635894027929817e23e48f046d8c9834f129"} Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.165985 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"15fca0a86dcc34f447addba3535c59b1ca98ef3cda2dbcbf97c244311afed8b3"} Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.166046 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"7c3755f92e7bcf59afceaafa4f819e583157465e0204f71dd320a4ab04117b3b"} Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.169156 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"017bbdd4ddb4f7631fada55a2b2b8b47d5c4982af55c33a2987a7270485f12dc"} Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.169220 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"95a7bc54207dc4f3645b486de64610459fd166165aaa591dad1e658bb082fd22"} Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.182803 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:06 crc kubenswrapper[5055]: E1011 06:56:06.183214 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:06.683198405 +0000 UTC m=+150.457472212 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.284398 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:06 crc kubenswrapper[5055]: E1011 06:56:06.284584 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:06.784545093 +0000 UTC m=+150.558818900 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.285075 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:06 crc kubenswrapper[5055]: E1011 06:56:06.285456 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:06.785441394 +0000 UTC m=+150.559715201 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.363254 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:06 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:06 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:06 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.363368 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.386164 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:06 crc kubenswrapper[5055]: E1011 06:56:06.386409 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:06.886355007 +0000 UTC m=+150.660628814 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.386491 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:06 crc kubenswrapper[5055]: E1011 06:56:06.386824 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:06.886811223 +0000 UTC m=+150.661085030 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.487342 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:06 crc kubenswrapper[5055]: E1011 06:56:06.487548 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:06.987518208 +0000 UTC m=+150.761792025 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.487903 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:06 crc kubenswrapper[5055]: E1011 06:56:06.488300 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:06.988289465 +0000 UTC m=+150.762563282 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.589213 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:06 crc kubenswrapper[5055]: E1011 06:56:06.589521 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:07.089505398 +0000 UTC m=+150.863779205 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.691324 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:06 crc kubenswrapper[5055]: E1011 06:56:06.691783 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:07.191746677 +0000 UTC m=+150.966020484 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.793050 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:06 crc kubenswrapper[5055]: E1011 06:56:06.793477 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:07.293446627 +0000 UTC m=+151.067720554 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.793621 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:06 crc kubenswrapper[5055]: E1011 06:56:06.794251 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:07.294227195 +0000 UTC m=+151.068501002 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.894714 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:06 crc kubenswrapper[5055]: E1011 06:56:06.895308 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:07.395263761 +0000 UTC m=+151.169537568 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:06 crc kubenswrapper[5055]: I1011 06:56:06.996655 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:06 crc kubenswrapper[5055]: E1011 06:56:06.997103 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:07.497083906 +0000 UTC m=+151.271357723 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.062137 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.063092 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.066189 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.066480 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.085952 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.158541 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.158736 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:07.658709953 +0000 UTC m=+151.432983760 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.159002 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b1d6bf0a-e29e-4eb5-84af-f13945fef106-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"b1d6bf0a-e29e-4eb5-84af-f13945fef106\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.159052 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b1d6bf0a-e29e-4eb5-84af-f13945fef106-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"b1d6bf0a-e29e-4eb5-84af-f13945fef106\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.159117 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.159468 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:07.65945858 +0000 UTC m=+151.433732487 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.181229 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"e82ee82dcc9000fc3d198d77cb1764c52b8c8acc1bb8f9f64f18b8a3543c46f3"} Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.181386 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.260361 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.260574 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:07.760543818 +0000 UTC m=+151.534817625 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.260689 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8njcl"] Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.260693 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.260919 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b1d6bf0a-e29e-4eb5-84af-f13945fef106-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"b1d6bf0a-e29e-4eb5-84af-f13945fef106\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.260996 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:07.760980843 +0000 UTC m=+151.535254720 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.261030 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b1d6bf0a-e29e-4eb5-84af-f13945fef106-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"b1d6bf0a-e29e-4eb5-84af-f13945fef106\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.261188 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b1d6bf0a-e29e-4eb5-84af-f13945fef106-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"b1d6bf0a-e29e-4eb5-84af-f13945fef106\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.261658 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:56:07 crc kubenswrapper[5055]: W1011 06:56:07.263683 5055 reflector.go:561] object-"openshift-marketplace"/"community-operators-dockercfg-dmngl": failed to list *v1.Secret: secrets "community-operators-dockercfg-dmngl" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-marketplace": no relationship found between node 'crc' and this object Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.263723 5055 reflector.go:158] "Unhandled Error" err="object-\"openshift-marketplace\"/\"community-operators-dockercfg-dmngl\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"community-operators-dockercfg-dmngl\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-marketplace\": no relationship found between node 'crc' and this object" logger="UnhandledError" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.291310 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8njcl"] Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.309364 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b1d6bf0a-e29e-4eb5-84af-f13945fef106-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"b1d6bf0a-e29e-4eb5-84af-f13945fef106\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.359928 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:07 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:07 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:07 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.359989 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.363032 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.363162 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:07.863135869 +0000 UTC m=+151.637409676 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.363510 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnqcq\" (UniqueName: \"kubernetes.io/projected/e917fbc6-7142-47bd-bd50-43d15fc8376c-kube-api-access-dnqcq\") pod \"community-operators-8njcl\" (UID: \"e917fbc6-7142-47bd-bd50-43d15fc8376c\") " pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.363654 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.364058 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:07.864050631 +0000 UTC m=+151.638324438 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.364101 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e917fbc6-7142-47bd-bd50-43d15fc8376c-catalog-content\") pod \"community-operators-8njcl\" (UID: \"e917fbc6-7142-47bd-bd50-43d15fc8376c\") " pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.364164 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e917fbc6-7142-47bd-bd50-43d15fc8376c-utilities\") pod \"community-operators-8njcl\" (UID: \"e917fbc6-7142-47bd-bd50-43d15fc8376c\") " pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.378019 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.454052 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-p9vsc"] Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.455146 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.457246 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.465630 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.465994 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e917fbc6-7142-47bd-bd50-43d15fc8376c-catalog-content\") pod \"community-operators-8njcl\" (UID: \"e917fbc6-7142-47bd-bd50-43d15fc8376c\") " pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.466043 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e917fbc6-7142-47bd-bd50-43d15fc8376c-utilities\") pod \"community-operators-8njcl\" (UID: \"e917fbc6-7142-47bd-bd50-43d15fc8376c\") " pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.466082 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnqcq\" (UniqueName: \"kubernetes.io/projected/e917fbc6-7142-47bd-bd50-43d15fc8376c-kube-api-access-dnqcq\") pod \"community-operators-8njcl\" (UID: \"e917fbc6-7142-47bd-bd50-43d15fc8376c\") " pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.466472 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:07.966446676 +0000 UTC m=+151.740720483 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.466954 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e917fbc6-7142-47bd-bd50-43d15fc8376c-catalog-content\") pod \"community-operators-8njcl\" (UID: \"e917fbc6-7142-47bd-bd50-43d15fc8376c\") " pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.467194 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e917fbc6-7142-47bd-bd50-43d15fc8376c-utilities\") pod \"community-operators-8njcl\" (UID: \"e917fbc6-7142-47bd-bd50-43d15fc8376c\") " pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.475088 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p9vsc"] Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.510976 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-gr9mj" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.512481 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnqcq\" (UniqueName: \"kubernetes.io/projected/e917fbc6-7142-47bd-bd50-43d15fc8376c-kube-api-access-dnqcq\") pod \"community-operators-8njcl\" (UID: \"e917fbc6-7142-47bd-bd50-43d15fc8376c\") " pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.568210 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fsttl\" (UniqueName: \"kubernetes.io/projected/4a62c31d-0b47-44e5-992b-7c9f99997384-kube-api-access-fsttl\") pod \"certified-operators-p9vsc\" (UID: \"4a62c31d-0b47-44e5-992b-7c9f99997384\") " pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.568256 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a62c31d-0b47-44e5-992b-7c9f99997384-catalog-content\") pod \"certified-operators-p9vsc\" (UID: \"4a62c31d-0b47-44e5-992b-7c9f99997384\") " pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.568288 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.568332 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a62c31d-0b47-44e5-992b-7c9f99997384-utilities\") pod \"certified-operators-p9vsc\" (UID: \"4a62c31d-0b47-44e5-992b-7c9f99997384\") " pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.568732 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:08.068720546 +0000 UTC m=+151.842994353 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.642533 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-z7b7n"] Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.643551 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.649666 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z7b7n"] Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.669011 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.669233 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:08.169203373 +0000 UTC m=+151.943477180 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.669320 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a62c31d-0b47-44e5-992b-7c9f99997384-utilities\") pod \"certified-operators-p9vsc\" (UID: \"4a62c31d-0b47-44e5-992b-7c9f99997384\") " pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.669433 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fsttl\" (UniqueName: \"kubernetes.io/projected/4a62c31d-0b47-44e5-992b-7c9f99997384-kube-api-access-fsttl\") pod \"certified-operators-p9vsc\" (UID: \"4a62c31d-0b47-44e5-992b-7c9f99997384\") " pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.669460 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a62c31d-0b47-44e5-992b-7c9f99997384-catalog-content\") pod \"certified-operators-p9vsc\" (UID: \"4a62c31d-0b47-44e5-992b-7c9f99997384\") " pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.669510 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.669700 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a62c31d-0b47-44e5-992b-7c9f99997384-utilities\") pod \"certified-operators-p9vsc\" (UID: \"4a62c31d-0b47-44e5-992b-7c9f99997384\") " pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.669725 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a62c31d-0b47-44e5-992b-7c9f99997384-catalog-content\") pod \"certified-operators-p9vsc\" (UID: \"4a62c31d-0b47-44e5-992b-7c9f99997384\") " pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.669827 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:08.169810925 +0000 UTC m=+151.944084742 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.685604 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fsttl\" (UniqueName: \"kubernetes.io/projected/4a62c31d-0b47-44e5-992b-7c9f99997384-kube-api-access-fsttl\") pod \"certified-operators-p9vsc\" (UID: \"4a62c31d-0b47-44e5-992b-7c9f99997384\") " pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.693976 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 11 06:56:07 crc kubenswrapper[5055]: W1011 06:56:07.704525 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podb1d6bf0a_e29e_4eb5_84af_f13945fef106.slice/crio-8e4e2c73b4e0d4fb6f6a438b78afc0a80eb24cf3e4a8c6915047ec3ae6708f13 WatchSource:0}: Error finding container 8e4e2c73b4e0d4fb6f6a438b78afc0a80eb24cf3e4a8c6915047ec3ae6708f13: Status 404 returned error can't find the container with id 8e4e2c73b4e0d4fb6f6a438b78afc0a80eb24cf3e4a8c6915047ec3ae6708f13 Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.733526 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.733588 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.733534 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.734091 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.770007 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.770592 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxnk4\" (UniqueName: \"kubernetes.io/projected/9cbf151b-75a5-48a9-87e3-6c90e2a897df-kube-api-access-cxnk4\") pod \"community-operators-z7b7n\" (UID: \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\") " pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.770698 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cbf151b-75a5-48a9-87e3-6c90e2a897df-utilities\") pod \"community-operators-z7b7n\" (UID: \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\") " pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.770724 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cbf151b-75a5-48a9-87e3-6c90e2a897df-catalog-content\") pod \"community-operators-z7b7n\" (UID: \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\") " pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.770853 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:08.270838661 +0000 UTC m=+152.045112468 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.780991 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.838249 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.852393 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wzpf9"] Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.853445 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.860561 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wzpf9"] Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.873825 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxnk4\" (UniqueName: \"kubernetes.io/projected/9cbf151b-75a5-48a9-87e3-6c90e2a897df-kube-api-access-cxnk4\") pod \"community-operators-z7b7n\" (UID: \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\") " pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.873883 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.873959 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cbf151b-75a5-48a9-87e3-6c90e2a897df-utilities\") pod \"community-operators-z7b7n\" (UID: \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\") " pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.873984 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cbf151b-75a5-48a9-87e3-6c90e2a897df-catalog-content\") pod \"community-operators-z7b7n\" (UID: \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\") " pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.875713 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:08.375698832 +0000 UTC m=+152.149972639 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.883917 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.892005 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cbf151b-75a5-48a9-87e3-6c90e2a897df-catalog-content\") pod \"community-operators-z7b7n\" (UID: \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\") " pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.892722 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cbf151b-75a5-48a9-87e3-6c90e2a897df-utilities\") pod \"community-operators-z7b7n\" (UID: \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\") " pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.895594 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.904819 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.905441 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.906782 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxnk4\" (UniqueName: \"kubernetes.io/projected/9cbf151b-75a5-48a9-87e3-6c90e2a897df-kube-api-access-cxnk4\") pod \"community-operators-z7b7n\" (UID: \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\") " pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.925546 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.925793 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.939040 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.974651 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.975011 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:08.474990468 +0000 UTC m=+152.249264275 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.975425 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4be1796-a941-4648-b20d-abe63dab37fd-utilities\") pod \"certified-operators-wzpf9\" (UID: \"a4be1796-a941-4648-b20d-abe63dab37fd\") " pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.975637 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qw8fw\" (UniqueName: \"kubernetes.io/projected/a4be1796-a941-4648-b20d-abe63dab37fd-kube-api-access-qw8fw\") pod \"certified-operators-wzpf9\" (UID: \"a4be1796-a941-4648-b20d-abe63dab37fd\") " pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.975697 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4be1796-a941-4648-b20d-abe63dab37fd-catalog-content\") pod \"certified-operators-wzpf9\" (UID: \"a4be1796-a941-4648-b20d-abe63dab37fd\") " pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:56:07 crc kubenswrapper[5055]: I1011 06:56:07.975797 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:07 crc kubenswrapper[5055]: E1011 06:56:07.978353 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:08.478341755 +0000 UTC m=+152.252615562 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.079334 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.079725 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qw8fw\" (UniqueName: \"kubernetes.io/projected/a4be1796-a941-4648-b20d-abe63dab37fd-kube-api-access-qw8fw\") pod \"certified-operators-wzpf9\" (UID: \"a4be1796-a941-4648-b20d-abe63dab37fd\") " pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.079755 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4be1796-a941-4648-b20d-abe63dab37fd-catalog-content\") pod \"certified-operators-wzpf9\" (UID: \"a4be1796-a941-4648-b20d-abe63dab37fd\") " pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.079942 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4be1796-a941-4648-b20d-abe63dab37fd-utilities\") pod \"certified-operators-wzpf9\" (UID: \"a4be1796-a941-4648-b20d-abe63dab37fd\") " pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.080412 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4be1796-a941-4648-b20d-abe63dab37fd-utilities\") pod \"certified-operators-wzpf9\" (UID: \"a4be1796-a941-4648-b20d-abe63dab37fd\") " pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:56:08 crc kubenswrapper[5055]: E1011 06:56:08.080496 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:08.58047712 +0000 UTC m=+152.354750927 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.083399 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4be1796-a941-4648-b20d-abe63dab37fd-catalog-content\") pod \"certified-operators-wzpf9\" (UID: \"a4be1796-a941-4648-b20d-abe63dab37fd\") " pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.120603 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qw8fw\" (UniqueName: \"kubernetes.io/projected/a4be1796-a941-4648-b20d-abe63dab37fd-kube-api-access-qw8fw\") pod \"certified-operators-wzpf9\" (UID: \"a4be1796-a941-4648-b20d-abe63dab37fd\") " pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.124892 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.125631 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.128165 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.131991 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.135893 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.152858 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p9vsc"] Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.175594 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.181322 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:08 crc kubenswrapper[5055]: E1011 06:56:08.181640 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:08.681624741 +0000 UTC m=+152.455898548 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.212117 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-4jldr" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.216271 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p9vsc" event={"ID":"4a62c31d-0b47-44e5-992b-7c9f99997384","Type":"ContainerStarted","Data":"d78e3836cb9e0c48faf33db2d298ceb096a42344291be5d5585a19c662adf433"} Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.218539 5055 generic.go:334] "Generic (PLEG): container finished" podID="25f102e3-a97b-4401-a760-98f34d6fe038" containerID="c8abc14887fe6b1e823f609b31de625ff1fe1c99704f169a579d6b80fa424e8a" exitCode=0 Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.218576 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" event={"ID":"25f102e3-a97b-4401-a760-98f34d6fe038","Type":"ContainerDied","Data":"c8abc14887fe6b1e823f609b31de625ff1fe1c99704f169a579d6b80fa424e8a"} Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.222027 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"b1d6bf0a-e29e-4eb5-84af-f13945fef106","Type":"ContainerStarted","Data":"8e4e2c73b4e0d4fb6f6a438b78afc0a80eb24cf3e4a8c6915047ec3ae6708f13"} Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.230293 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-d7cr2" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.297951 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.298215 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/090781c6-fd6b-4385-adce-a13671655e1b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"090781c6-fd6b-4385-adce-a13671655e1b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.298285 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/090781c6-fd6b-4385-adce-a13671655e1b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"090781c6-fd6b-4385-adce-a13671655e1b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 06:56:08 crc kubenswrapper[5055]: E1011 06:56:08.299131 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:08.799111904 +0000 UTC m=+152.573385711 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.356261 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.359135 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:08 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:08 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:08 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.359188 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.399551 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/090781c6-fd6b-4385-adce-a13671655e1b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"090781c6-fd6b-4385-adce-a13671655e1b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.399903 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.399929 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/090781c6-fd6b-4385-adce-a13671655e1b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"090781c6-fd6b-4385-adce-a13671655e1b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 06:56:08 crc kubenswrapper[5055]: E1011 06:56:08.400940 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:08.900928248 +0000 UTC m=+152.675202055 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.400978 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/090781c6-fd6b-4385-adce-a13671655e1b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"090781c6-fd6b-4385-adce-a13671655e1b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.410494 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qmgrs" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.418553 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.428225 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/090781c6-fd6b-4385-adce-a13671655e1b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"090781c6-fd6b-4385-adce-a13671655e1b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.472719 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.501364 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:08 crc kubenswrapper[5055]: E1011 06:56:08.501530 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:09.001508039 +0000 UTC m=+152.775781846 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.501709 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:08 crc kubenswrapper[5055]: E1011 06:56:08.502699 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:09.00268028 +0000 UTC m=+152.776954177 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.532665 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wzpf9"] Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.541528 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.545632 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.549916 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.602467 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:08 crc kubenswrapper[5055]: E1011 06:56:08.602703 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:09.1026851 +0000 UTC m=+152.876958907 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.603176 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:08 crc kubenswrapper[5055]: E1011 06:56:08.603494 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:09.103486238 +0000 UTC m=+152.877760045 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.704500 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:08 crc kubenswrapper[5055]: E1011 06:56:08.704935 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:09.204920209 +0000 UTC m=+152.979194016 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.712645 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.783188 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.790930 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-gs8q9" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.804333 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-xgrhb" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.815831 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.816709 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.819516 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:08 crc kubenswrapper[5055]: E1011 06:56:08.819952 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:09.319938675 +0000 UTC m=+153.094212492 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.820917 5055 patch_prober.go:28] interesting pod/console-f9d7485db-gjjf2 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.42:8443/health\": dial tcp 10.217.0.42:8443: connect: connection refused" start-of-body= Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.820948 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-gjjf2" podUID="bbad9e64-0bb0-4acc-9bff-63234abcf93c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.42:8443/health\": dial tcp 10.217.0.42:8443: connect: connection refused" Oct 11 06:56:08 crc kubenswrapper[5055]: I1011 06:56:08.922433 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:08 crc kubenswrapper[5055]: E1011 06:56:08.924220 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:09.424183715 +0000 UTC m=+153.198457522 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.003321 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z7b7n"] Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.024786 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.025220 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:09.525196421 +0000 UTC m=+153.299470228 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.077870 5055 patch_prober.go:28] interesting pod/apiserver-76f77b778f-t28vk container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Oct 11 06:56:09 crc kubenswrapper[5055]: [+]log ok Oct 11 06:56:09 crc kubenswrapper[5055]: [+]etcd ok Oct 11 06:56:09 crc kubenswrapper[5055]: [+]poststarthook/start-apiserver-admission-initializer ok Oct 11 06:56:09 crc kubenswrapper[5055]: [+]poststarthook/generic-apiserver-start-informers ok Oct 11 06:56:09 crc kubenswrapper[5055]: [+]poststarthook/max-in-flight-filter ok Oct 11 06:56:09 crc kubenswrapper[5055]: [+]poststarthook/storage-object-count-tracker-hook ok Oct 11 06:56:09 crc kubenswrapper[5055]: [+]poststarthook/image.openshift.io-apiserver-caches ok Oct 11 06:56:09 crc kubenswrapper[5055]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Oct 11 06:56:09 crc kubenswrapper[5055]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Oct 11 06:56:09 crc kubenswrapper[5055]: [+]poststarthook/project.openshift.io-projectcache ok Oct 11 06:56:09 crc kubenswrapper[5055]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Oct 11 06:56:09 crc kubenswrapper[5055]: [+]poststarthook/openshift.io-startinformers ok Oct 11 06:56:09 crc kubenswrapper[5055]: [+]poststarthook/openshift.io-restmapperupdater ok Oct 11 06:56:09 crc kubenswrapper[5055]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Oct 11 06:56:09 crc kubenswrapper[5055]: livez check failed Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.077964 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-t28vk" podUID="7a7df9e2-1bec-43f6-9459-71b36b81372f" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.087623 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8njcl"] Oct 11 06:56:09 crc kubenswrapper[5055]: W1011 06:56:09.104847 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode917fbc6_7142_47bd_bd50_43d15fc8376c.slice/crio-30b80c68fdba54ccd42b528a1493282265cfbd4a6f9756d6a63c2fbececa5ee3 WatchSource:0}: Error finding container 30b80c68fdba54ccd42b528a1493282265cfbd4a6f9756d6a63c2fbececa5ee3: Status 404 returned error can't find the container with id 30b80c68fdba54ccd42b528a1493282265cfbd4a6f9756d6a63c2fbececa5ee3 Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.125498 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.125969 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:09.625951208 +0000 UTC m=+153.400225015 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.229468 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.229704 5055 generic.go:334] "Generic (PLEG): container finished" podID="b1d6bf0a-e29e-4eb5-84af-f13945fef106" containerID="938cbcca10e8de28e3d14998600e23791277f0c474be8df75ce06a188d651655" exitCode=0 Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.229804 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:09.729790402 +0000 UTC m=+153.504064209 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.229940 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"b1d6bf0a-e29e-4eb5-84af-f13945fef106","Type":"ContainerDied","Data":"938cbcca10e8de28e3d14998600e23791277f0c474be8df75ce06a188d651655"} Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.232212 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"090781c6-fd6b-4385-adce-a13671655e1b","Type":"ContainerStarted","Data":"3e3cf00a66ddda0c18eff1b48561fa8be164da0c7d2b27c59c8712785f5756ab"} Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.233976 5055 generic.go:334] "Generic (PLEG): container finished" podID="a4be1796-a941-4648-b20d-abe63dab37fd" containerID="a5ed70aa31b6f53d6ab53070addfc5a81d7882c20784132274ecf0593a2d501f" exitCode=0 Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.234084 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wzpf9" event={"ID":"a4be1796-a941-4648-b20d-abe63dab37fd","Type":"ContainerDied","Data":"a5ed70aa31b6f53d6ab53070addfc5a81d7882c20784132274ecf0593a2d501f"} Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.234135 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wzpf9" event={"ID":"a4be1796-a941-4648-b20d-abe63dab37fd","Type":"ContainerStarted","Data":"d577f399587408ddcec56d8b3470c887d8a2998a98df9cf5f2f4659bce91dfd7"} Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.235350 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.236936 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" event={"ID":"b62bbdc8-f6bd-4772-9326-8eae843a6f9f","Type":"ContainerStarted","Data":"c690600b797a361ed019137d9b0e8f893206afa7bf7b232aea20414b7918b2d4"} Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.236973 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" event={"ID":"b62bbdc8-f6bd-4772-9326-8eae843a6f9f","Type":"ContainerStarted","Data":"de6f2f638895ae960abfc750af1337b927422aa59ac57c09074ff247876cf2a8"} Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.239100 5055 generic.go:334] "Generic (PLEG): container finished" podID="4a62c31d-0b47-44e5-992b-7c9f99997384" containerID="5e1f335f6a216c73faff831500bbe67c05869ee918fe00b2f70ea3077157b7e0" exitCode=0 Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.239165 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p9vsc" event={"ID":"4a62c31d-0b47-44e5-992b-7c9f99997384","Type":"ContainerDied","Data":"5e1f335f6a216c73faff831500bbe67c05869ee918fe00b2f70ea3077157b7e0"} Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.241223 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8njcl" event={"ID":"e917fbc6-7142-47bd-bd50-43d15fc8376c","Type":"ContainerStarted","Data":"30b80c68fdba54ccd42b528a1493282265cfbd4a6f9756d6a63c2fbececa5ee3"} Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.244361 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7b7n" event={"ID":"9cbf151b-75a5-48a9-87e3-6c90e2a897df","Type":"ContainerStarted","Data":"a7aeb421f899574b912b3422b758ea44c6bd2a13cfe518eda16579d3763f8334"} Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.250439 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4rsr9"] Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.251517 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.257475 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.288813 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rsr9"] Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.330092 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.330350 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:09.83030939 +0000 UTC m=+153.604583197 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.330496 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.330614 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5185075e-f5f6-4244-a81d-6f0943eabaf3-utilities\") pod \"redhat-marketplace-4rsr9\" (UID: \"5185075e-f5f6-4244-a81d-6f0943eabaf3\") " pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.330713 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmmf4\" (UniqueName: \"kubernetes.io/projected/5185075e-f5f6-4244-a81d-6f0943eabaf3-kube-api-access-tmmf4\") pod \"redhat-marketplace-4rsr9\" (UID: \"5185075e-f5f6-4244-a81d-6f0943eabaf3\") " pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.330740 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5185075e-f5f6-4244-a81d-6f0943eabaf3-catalog-content\") pod \"redhat-marketplace-4rsr9\" (UID: \"5185075e-f5f6-4244-a81d-6f0943eabaf3\") " pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.331196 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:09.831184611 +0000 UTC m=+153.605458608 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.359514 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:09 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:09 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:09 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.359593 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.431857 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.432266 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:09.932243688 +0000 UTC m=+153.706517495 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.432308 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.432359 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5185075e-f5f6-4244-a81d-6f0943eabaf3-utilities\") pod \"redhat-marketplace-4rsr9\" (UID: \"5185075e-f5f6-4244-a81d-6f0943eabaf3\") " pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.432408 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmmf4\" (UniqueName: \"kubernetes.io/projected/5185075e-f5f6-4244-a81d-6f0943eabaf3-kube-api-access-tmmf4\") pod \"redhat-marketplace-4rsr9\" (UID: \"5185075e-f5f6-4244-a81d-6f0943eabaf3\") " pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.432432 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5185075e-f5f6-4244-a81d-6f0943eabaf3-catalog-content\") pod \"redhat-marketplace-4rsr9\" (UID: \"5185075e-f5f6-4244-a81d-6f0943eabaf3\") " pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.432914 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5185075e-f5f6-4244-a81d-6f0943eabaf3-catalog-content\") pod \"redhat-marketplace-4rsr9\" (UID: \"5185075e-f5f6-4244-a81d-6f0943eabaf3\") " pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.433151 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:09.93314307 +0000 UTC m=+153.707416877 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.433534 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5185075e-f5f6-4244-a81d-6f0943eabaf3-utilities\") pod \"redhat-marketplace-4rsr9\" (UID: \"5185075e-f5f6-4244-a81d-6f0943eabaf3\") " pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.454492 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmmf4\" (UniqueName: \"kubernetes.io/projected/5185075e-f5f6-4244-a81d-6f0943eabaf3-kube-api-access-tmmf4\") pod \"redhat-marketplace-4rsr9\" (UID: \"5185075e-f5f6-4244-a81d-6f0943eabaf3\") " pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.533876 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.533976 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.033961029 +0000 UTC m=+153.808234836 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.534201 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.534464 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.034458326 +0000 UTC m=+153.808732133 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.535913 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.578813 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.636473 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/25f102e3-a97b-4401-a760-98f34d6fe038-secret-volume\") pod \"25f102e3-a97b-4401-a760-98f34d6fe038\" (UID: \"25f102e3-a97b-4401-a760-98f34d6fe038\") " Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.636575 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/25f102e3-a97b-4401-a760-98f34d6fe038-config-volume\") pod \"25f102e3-a97b-4401-a760-98f34d6fe038\" (UID: \"25f102e3-a97b-4401-a760-98f34d6fe038\") " Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.636619 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mq2sv\" (UniqueName: \"kubernetes.io/projected/25f102e3-a97b-4401-a760-98f34d6fe038-kube-api-access-mq2sv\") pod \"25f102e3-a97b-4401-a760-98f34d6fe038\" (UID: \"25f102e3-a97b-4401-a760-98f34d6fe038\") " Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.637016 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.637676 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.137426061 +0000 UTC m=+153.911699858 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.637748 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25f102e3-a97b-4401-a760-98f34d6fe038-config-volume" (OuterVolumeSpecName: "config-volume") pod "25f102e3-a97b-4401-a760-98f34d6fe038" (UID: "25f102e3-a97b-4401-a760-98f34d6fe038"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.641691 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25f102e3-a97b-4401-a760-98f34d6fe038-kube-api-access-mq2sv" (OuterVolumeSpecName: "kube-api-access-mq2sv") pod "25f102e3-a97b-4401-a760-98f34d6fe038" (UID: "25f102e3-a97b-4401-a760-98f34d6fe038"). InnerVolumeSpecName "kube-api-access-mq2sv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.642149 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25f102e3-a97b-4401-a760-98f34d6fe038-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "25f102e3-a97b-4401-a760-98f34d6fe038" (UID: "25f102e3-a97b-4401-a760-98f34d6fe038"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.644407 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6vv9z"] Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.644626 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25f102e3-a97b-4401-a760-98f34d6fe038" containerName="collect-profiles" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.644638 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="25f102e3-a97b-4401-a760-98f34d6fe038" containerName="collect-profiles" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.644748 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="25f102e3-a97b-4401-a760-98f34d6fe038" containerName="collect-profiles" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.649238 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.652700 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vv9z"] Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.738824 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.738881 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48eaf223-88fa-49e9-98e8-c971092d0fd8-utilities\") pod \"redhat-marketplace-6vv9z\" (UID: \"48eaf223-88fa-49e9-98e8-c971092d0fd8\") " pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.738907 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gntzf\" (UniqueName: \"kubernetes.io/projected/48eaf223-88fa-49e9-98e8-c971092d0fd8-kube-api-access-gntzf\") pod \"redhat-marketplace-6vv9z\" (UID: \"48eaf223-88fa-49e9-98e8-c971092d0fd8\") " pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.738965 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48eaf223-88fa-49e9-98e8-c971092d0fd8-catalog-content\") pod \"redhat-marketplace-6vv9z\" (UID: \"48eaf223-88fa-49e9-98e8-c971092d0fd8\") " pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.739012 5055 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/25f102e3-a97b-4401-a760-98f34d6fe038-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.739024 5055 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/25f102e3-a97b-4401-a760-98f34d6fe038-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.739033 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mq2sv\" (UniqueName: \"kubernetes.io/projected/25f102e3-a97b-4401-a760-98f34d6fe038-kube-api-access-mq2sv\") on node \"crc\" DevicePath \"\"" Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.739140 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.239127401 +0000 UTC m=+154.013401208 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.769794 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rsr9"] Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.840148 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.840365 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48eaf223-88fa-49e9-98e8-c971092d0fd8-catalog-content\") pod \"redhat-marketplace-6vv9z\" (UID: \"48eaf223-88fa-49e9-98e8-c971092d0fd8\") " pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.840419 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.340393416 +0000 UTC m=+154.114667223 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.840550 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.840629 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48eaf223-88fa-49e9-98e8-c971092d0fd8-utilities\") pod \"redhat-marketplace-6vv9z\" (UID: \"48eaf223-88fa-49e9-98e8-c971092d0fd8\") " pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.840654 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gntzf\" (UniqueName: \"kubernetes.io/projected/48eaf223-88fa-49e9-98e8-c971092d0fd8-kube-api-access-gntzf\") pod \"redhat-marketplace-6vv9z\" (UID: \"48eaf223-88fa-49e9-98e8-c971092d0fd8\") " pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.841532 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.341515335 +0000 UTC m=+154.115789142 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.941568 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.941736 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.441702442 +0000 UTC m=+154.215976269 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:09 crc kubenswrapper[5055]: I1011 06:56:09.942042 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:09 crc kubenswrapper[5055]: E1011 06:56:09.942462 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.442451118 +0000 UTC m=+154.216724935 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.042874 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.043030 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.543003488 +0000 UTC m=+154.317277295 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.043182 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.043521 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.543513086 +0000 UTC m=+154.317786893 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.084787 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48eaf223-88fa-49e9-98e8-c971092d0fd8-catalog-content\") pod \"redhat-marketplace-6vv9z\" (UID: \"48eaf223-88fa-49e9-98e8-c971092d0fd8\") " pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.084824 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48eaf223-88fa-49e9-98e8-c971092d0fd8-utilities\") pod \"redhat-marketplace-6vv9z\" (UID: \"48eaf223-88fa-49e9-98e8-c971092d0fd8\") " pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.088742 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gntzf\" (UniqueName: \"kubernetes.io/projected/48eaf223-88fa-49e9-98e8-c971092d0fd8-kube-api-access-gntzf\") pod \"redhat-marketplace-6vv9z\" (UID: \"48eaf223-88fa-49e9-98e8-c971092d0fd8\") " pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.144403 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.144535 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.644514872 +0000 UTC m=+154.418788689 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.144686 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.145038 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.64502717 +0000 UTC m=+154.419300977 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.245272 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.245445 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.745419564 +0000 UTC m=+154.519693381 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.245582 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.245953 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.745943472 +0000 UTC m=+154.520217279 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.258827 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" event={"ID":"25f102e3-a97b-4401-a760-98f34d6fe038","Type":"ContainerDied","Data":"ad9b912a8ca25439831c44580a37b7557f5a87a37d70439f3ff5ddbc32cd1fd0"} Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.258880 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad9b912a8ca25439831c44580a37b7557f5a87a37d70439f3ff5ddbc32cd1fd0" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.258847 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.261812 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7b7n" event={"ID":"9cbf151b-75a5-48a9-87e3-6c90e2a897df","Type":"ContainerDied","Data":"c27ead34ccc12d24a85d12684b47cbb7856c69feba93866df366f930c7506a80"} Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.261726 5055 generic.go:334] "Generic (PLEG): container finished" podID="9cbf151b-75a5-48a9-87e3-6c90e2a897df" containerID="c27ead34ccc12d24a85d12684b47cbb7856c69feba93866df366f930c7506a80" exitCode=0 Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.263334 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rsr9" event={"ID":"5185075e-f5f6-4244-a81d-6f0943eabaf3","Type":"ContainerStarted","Data":"a1335831eea0e4827e83c98f2171472e1c3a9be30abd9d237ea9f5e0d552a22d"} Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.266498 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8njcl" event={"ID":"e917fbc6-7142-47bd-bd50-43d15fc8376c","Type":"ContainerStarted","Data":"69d915da700647fa623a0e4d89b62866ab489b6a098788340a07e992502022d6"} Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.271823 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.346382 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.346516 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.846485982 +0000 UTC m=+154.620759789 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.346862 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.348297 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.848279725 +0000 UTC m=+154.622553622 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.362490 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:10 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:10 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:10 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.362578 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.448347 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.448634 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:10.948619187 +0000 UTC m=+154.722892994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.510659 5055 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.517156 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.550595 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:11.050583876 +0000 UTC m=+154.824857683 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.550752 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.650132 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5j92b"] Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.650362 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1d6bf0a-e29e-4eb5-84af-f13945fef106" containerName="pruner" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.650377 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1d6bf0a-e29e-4eb5-84af-f13945fef106" containerName="pruner" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.650499 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1d6bf0a-e29e-4eb5-84af-f13945fef106" containerName="pruner" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.651490 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.651746 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.651835 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b1d6bf0a-e29e-4eb5-84af-f13945fef106-kubelet-dir\") pod \"b1d6bf0a-e29e-4eb5-84af-f13945fef106\" (UID: \"b1d6bf0a-e29e-4eb5-84af-f13945fef106\") " Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.651884 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:11.151864062 +0000 UTC m=+154.926137869 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.651907 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b1d6bf0a-e29e-4eb5-84af-f13945fef106-kube-api-access\") pod \"b1d6bf0a-e29e-4eb5-84af-f13945fef106\" (UID: \"b1d6bf0a-e29e-4eb5-84af-f13945fef106\") " Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.651906 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b1d6bf0a-e29e-4eb5-84af-f13945fef106-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "b1d6bf0a-e29e-4eb5-84af-f13945fef106" (UID: "b1d6bf0a-e29e-4eb5-84af-f13945fef106"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.652205 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.652252 5055 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b1d6bf0a-e29e-4eb5-84af-f13945fef106-kubelet-dir\") on node \"crc\" DevicePath \"\"" Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.652501 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:11.152491734 +0000 UTC m=+154.926765541 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.653939 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.658017 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1d6bf0a-e29e-4eb5-84af-f13945fef106-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "b1d6bf0a-e29e-4eb5-84af-f13945fef106" (UID: "b1d6bf0a-e29e-4eb5-84af-f13945fef106"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.668977 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5j92b"] Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.725922 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vv9z"] Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.753537 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.753710 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:11.253684496 +0000 UTC m=+155.027958303 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.753782 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d42319b-9bfe-4681-8e7f-dd085d3aef16-utilities\") pod \"redhat-operators-5j92b\" (UID: \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\") " pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.753823 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.753854 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nx9l\" (UniqueName: \"kubernetes.io/projected/8d42319b-9bfe-4681-8e7f-dd085d3aef16-kube-api-access-5nx9l\") pod \"redhat-operators-5j92b\" (UID: \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\") " pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.753911 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d42319b-9bfe-4681-8e7f-dd085d3aef16-catalog-content\") pod \"redhat-operators-5j92b\" (UID: \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\") " pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.753949 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b1d6bf0a-e29e-4eb5-84af-f13945fef106-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.754130 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:11.254121781 +0000 UTC m=+155.028395588 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: W1011 06:56:10.788692 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48eaf223_88fa_49e9_98e8_c971092d0fd8.slice/crio-e4cb7c9d907943826960204a4bf8fc2c3a57f88d4b1b4cb2bc8fbae95a64cf10 WatchSource:0}: Error finding container e4cb7c9d907943826960204a4bf8fc2c3a57f88d4b1b4cb2bc8fbae95a64cf10: Status 404 returned error can't find the container with id e4cb7c9d907943826960204a4bf8fc2c3a57f88d4b1b4cb2bc8fbae95a64cf10 Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.854793 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.855049 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:11.355019413 +0000 UTC m=+155.129293220 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.855280 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d42319b-9bfe-4681-8e7f-dd085d3aef16-utilities\") pod \"redhat-operators-5j92b\" (UID: \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\") " pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.855354 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.855437 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nx9l\" (UniqueName: \"kubernetes.io/projected/8d42319b-9bfe-4681-8e7f-dd085d3aef16-kube-api-access-5nx9l\") pod \"redhat-operators-5j92b\" (UID: \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\") " pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.855522 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d42319b-9bfe-4681-8e7f-dd085d3aef16-catalog-content\") pod \"redhat-operators-5j92b\" (UID: \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\") " pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.855713 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:11.355704707 +0000 UTC m=+155.129978514 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.855736 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d42319b-9bfe-4681-8e7f-dd085d3aef16-utilities\") pod \"redhat-operators-5j92b\" (UID: \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\") " pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.855986 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d42319b-9bfe-4681-8e7f-dd085d3aef16-catalog-content\") pod \"redhat-operators-5j92b\" (UID: \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\") " pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.875481 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nx9l\" (UniqueName: \"kubernetes.io/projected/8d42319b-9bfe-4681-8e7f-dd085d3aef16-kube-api-access-5nx9l\") pod \"redhat-operators-5j92b\" (UID: \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\") " pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.956755 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.956934 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:11.45690567 +0000 UTC m=+155.231179477 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.957048 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:10 crc kubenswrapper[5055]: E1011 06:56:10.957328 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:11.457311664 +0000 UTC m=+155.231585461 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:10 crc kubenswrapper[5055]: I1011 06:56:10.973710 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.050001 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-drmwq"] Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.051119 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.058466 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:11 crc kubenswrapper[5055]: E1011 06:56:11.058821 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-11 06:56:11.558804937 +0000 UTC m=+155.333078744 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.066856 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-drmwq"] Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.160543 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-catalog-content\") pod \"redhat-operators-drmwq\" (UID: \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\") " pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.160720 5055 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-10-11T06:56:10.51068565Z","Handler":null,"Name":""} Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.160945 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hvdt\" (UniqueName: \"kubernetes.io/projected/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-kube-api-access-7hvdt\") pod \"redhat-operators-drmwq\" (UID: \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\") " pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.161051 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.161106 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-utilities\") pod \"redhat-operators-drmwq\" (UID: \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\") " pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:56:11 crc kubenswrapper[5055]: E1011 06:56:11.161382 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-11 06:56:11.661368967 +0000 UTC m=+155.435642774 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-svqd7" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.182895 5055 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.182933 5055 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.214219 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5j92b"] Oct 11 06:56:11 crc kubenswrapper[5055]: W1011 06:56:11.243511 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8d42319b_9bfe_4681_8e7f_dd085d3aef16.slice/crio-04021612f7af9976b8f93c48f6b108bba6c56c87d42e0f21e46f8f43709816a7 WatchSource:0}: Error finding container 04021612f7af9976b8f93c48f6b108bba6c56c87d42e0f21e46f8f43709816a7: Status 404 returned error can't find the container with id 04021612f7af9976b8f93c48f6b108bba6c56c87d42e0f21e46f8f43709816a7 Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.262455 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.262669 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-catalog-content\") pod \"redhat-operators-drmwq\" (UID: \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\") " pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.262702 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hvdt\" (UniqueName: \"kubernetes.io/projected/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-kube-api-access-7hvdt\") pod \"redhat-operators-drmwq\" (UID: \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\") " pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.262785 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-utilities\") pod \"redhat-operators-drmwq\" (UID: \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\") " pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.263173 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-utilities\") pod \"redhat-operators-drmwq\" (UID: \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\") " pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.263520 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-catalog-content\") pod \"redhat-operators-drmwq\" (UID: \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\") " pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.268218 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.274437 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5j92b" event={"ID":"8d42319b-9bfe-4681-8e7f-dd085d3aef16","Type":"ContainerStarted","Data":"04021612f7af9976b8f93c48f6b108bba6c56c87d42e0f21e46f8f43709816a7"} Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.276378 5055 generic.go:334] "Generic (PLEG): container finished" podID="090781c6-fd6b-4385-adce-a13671655e1b" containerID="4a124e667fdcdda2307094b221f0645ed0538d1bb9dde6837517ddf9f6c642c6" exitCode=0 Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.276474 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"090781c6-fd6b-4385-adce-a13671655e1b","Type":"ContainerDied","Data":"4a124e667fdcdda2307094b221f0645ed0538d1bb9dde6837517ddf9f6c642c6"} Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.279793 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" event={"ID":"b62bbdc8-f6bd-4772-9326-8eae843a6f9f","Type":"ContainerStarted","Data":"8b5081efc551cbfa3866bf760aaf1a2d945a4671638e7d550924f61ac19c16b1"} Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.281238 5055 generic.go:334] "Generic (PLEG): container finished" podID="e917fbc6-7142-47bd-bd50-43d15fc8376c" containerID="69d915da700647fa623a0e4d89b62866ab489b6a098788340a07e992502022d6" exitCode=0 Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.281341 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8njcl" event={"ID":"e917fbc6-7142-47bd-bd50-43d15fc8376c","Type":"ContainerDied","Data":"69d915da700647fa623a0e4d89b62866ab489b6a098788340a07e992502022d6"} Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.284198 5055 generic.go:334] "Generic (PLEG): container finished" podID="48eaf223-88fa-49e9-98e8-c971092d0fd8" containerID="f6d67b41ec59022d7f0a88fbc92c78d536cd1b5fddffca6ad3f9df9fc723ba3a" exitCode=0 Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.284278 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vv9z" event={"ID":"48eaf223-88fa-49e9-98e8-c971092d0fd8","Type":"ContainerDied","Data":"f6d67b41ec59022d7f0a88fbc92c78d536cd1b5fddffca6ad3f9df9fc723ba3a"} Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.284308 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vv9z" event={"ID":"48eaf223-88fa-49e9-98e8-c971092d0fd8","Type":"ContainerStarted","Data":"e4cb7c9d907943826960204a4bf8fc2c3a57f88d4b1b4cb2bc8fbae95a64cf10"} Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.284969 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hvdt\" (UniqueName: \"kubernetes.io/projected/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-kube-api-access-7hvdt\") pod \"redhat-operators-drmwq\" (UID: \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\") " pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.286474 5055 generic.go:334] "Generic (PLEG): container finished" podID="5185075e-f5f6-4244-a81d-6f0943eabaf3" containerID="43751a38fa97735c7d95fe0af657816af7f8be213b28baa5c50b08e69417e320" exitCode=0 Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.286533 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rsr9" event={"ID":"5185075e-f5f6-4244-a81d-6f0943eabaf3","Type":"ContainerDied","Data":"43751a38fa97735c7d95fe0af657816af7f8be213b28baa5c50b08e69417e320"} Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.291127 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"b1d6bf0a-e29e-4eb5-84af-f13945fef106","Type":"ContainerDied","Data":"8e4e2c73b4e0d4fb6f6a438b78afc0a80eb24cf3e4a8c6915047ec3ae6708f13"} Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.291170 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e4e2c73b4e0d4fb6f6a438b78afc0a80eb24cf3e4a8c6915047ec3ae6708f13" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.291207 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.325397 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-ft9nj" podStartSLOduration=16.325375728 podStartE2EDuration="16.325375728s" podCreationTimestamp="2025-10-11 06:55:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:11.325057207 +0000 UTC m=+155.099331024" watchObservedRunningTime="2025-10-11 06:56:11.325375728 +0000 UTC m=+155.099649535" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.359505 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:11 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:11 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:11 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.359560 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.363489 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.367596 5055 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.367639 5055 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.371249 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.393849 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-svqd7\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.445214 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.658179 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-drmwq"] Oct 11 06:56:11 crc kubenswrapper[5055]: W1011 06:56:11.678176 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda1e9f40e_8f93_4dd9_9c7f_e37175a4d9da.slice/crio-1226e910b7179b27806a4333658958b87cc49b85917f35c870f799eb63bab712 WatchSource:0}: Error finding container 1226e910b7179b27806a4333658958b87cc49b85917f35c870f799eb63bab712: Status 404 returned error can't find the container with id 1226e910b7179b27806a4333658958b87cc49b85917f35c870f799eb63bab712 Oct 11 06:56:11 crc kubenswrapper[5055]: I1011 06:56:11.697516 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-svqd7"] Oct 11 06:56:11 crc kubenswrapper[5055]: W1011 06:56:11.704577 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7c8fb95a_98dc_4592_afe6_195dc531d8df.slice/crio-f5c23caebbbe90fcaf46dea06582096e4d0738d7e0d460629ced1ef3e18bd67a WatchSource:0}: Error finding container f5c23caebbbe90fcaf46dea06582096e4d0738d7e0d460629ced1ef3e18bd67a: Status 404 returned error can't find the container with id f5c23caebbbe90fcaf46dea06582096e4d0738d7e0d460629ced1ef3e18bd67a Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.298197 5055 generic.go:334] "Generic (PLEG): container finished" podID="8d42319b-9bfe-4681-8e7f-dd085d3aef16" containerID="eb2cc3bb2fcb3479723c497c68da36018ca9ac96251904938f1be7b40f5b631c" exitCode=0 Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.298279 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5j92b" event={"ID":"8d42319b-9bfe-4681-8e7f-dd085d3aef16","Type":"ContainerDied","Data":"eb2cc3bb2fcb3479723c497c68da36018ca9ac96251904938f1be7b40f5b631c"} Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.301379 5055 generic.go:334] "Generic (PLEG): container finished" podID="a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" containerID="71b58bb23f3d6bd9fa7e14bc766da1173ab2d8ef15dbe1cb87eb0f6f55517435" exitCode=0 Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.301448 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-drmwq" event={"ID":"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da","Type":"ContainerDied","Data":"71b58bb23f3d6bd9fa7e14bc766da1173ab2d8ef15dbe1cb87eb0f6f55517435"} Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.301482 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-drmwq" event={"ID":"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da","Type":"ContainerStarted","Data":"1226e910b7179b27806a4333658958b87cc49b85917f35c870f799eb63bab712"} Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.304481 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" event={"ID":"7c8fb95a-98dc-4592-afe6-195dc531d8df","Type":"ContainerStarted","Data":"b3267381609136c22c430a81cf0f5fb1e1d76b532b3279de1b1eaada5edde7d5"} Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.304506 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" event={"ID":"7c8fb95a-98dc-4592-afe6-195dc531d8df","Type":"ContainerStarted","Data":"f5c23caebbbe90fcaf46dea06582096e4d0738d7e0d460629ced1ef3e18bd67a"} Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.359246 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:12 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:12 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:12 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.359299 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.546376 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.680926 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/090781c6-fd6b-4385-adce-a13671655e1b-kubelet-dir\") pod \"090781c6-fd6b-4385-adce-a13671655e1b\" (UID: \"090781c6-fd6b-4385-adce-a13671655e1b\") " Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.681012 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/090781c6-fd6b-4385-adce-a13671655e1b-kube-api-access\") pod \"090781c6-fd6b-4385-adce-a13671655e1b\" (UID: \"090781c6-fd6b-4385-adce-a13671655e1b\") " Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.681035 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/090781c6-fd6b-4385-adce-a13671655e1b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "090781c6-fd6b-4385-adce-a13671655e1b" (UID: "090781c6-fd6b-4385-adce-a13671655e1b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.681702 5055 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/090781c6-fd6b-4385-adce-a13671655e1b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.688114 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/090781c6-fd6b-4385-adce-a13671655e1b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "090781c6-fd6b-4385-adce-a13671655e1b" (UID: "090781c6-fd6b-4385-adce-a13671655e1b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.782612 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/090781c6-fd6b-4385-adce-a13671655e1b-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.909367 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:56:12 crc kubenswrapper[5055]: I1011 06:56:12.913565 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-t28vk" Oct 11 06:56:13 crc kubenswrapper[5055]: I1011 06:56:13.024945 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Oct 11 06:56:13 crc kubenswrapper[5055]: I1011 06:56:13.313209 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"090781c6-fd6b-4385-adce-a13671655e1b","Type":"ContainerDied","Data":"3e3cf00a66ddda0c18eff1b48561fa8be164da0c7d2b27c59c8712785f5756ab"} Oct 11 06:56:13 crc kubenswrapper[5055]: I1011 06:56:13.313243 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 11 06:56:13 crc kubenswrapper[5055]: I1011 06:56:13.313253 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e3cf00a66ddda0c18eff1b48561fa8be164da0c7d2b27c59c8712785f5756ab" Oct 11 06:56:13 crc kubenswrapper[5055]: I1011 06:56:13.313290 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:13 crc kubenswrapper[5055]: I1011 06:56:13.329944 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" podStartSLOduration=137.329924757 podStartE2EDuration="2m17.329924757s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:13.329870625 +0000 UTC m=+157.104144432" watchObservedRunningTime="2025-10-11 06:56:13.329924757 +0000 UTC m=+157.104198564" Oct 11 06:56:13 crc kubenswrapper[5055]: I1011 06:56:13.360375 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:13 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:13 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:13 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:13 crc kubenswrapper[5055]: I1011 06:56:13.360449 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:13 crc kubenswrapper[5055]: I1011 06:56:13.542434 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-rdsf5" Oct 11 06:56:14 crc kubenswrapper[5055]: I1011 06:56:14.361941 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:14 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:14 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:14 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:14 crc kubenswrapper[5055]: I1011 06:56:14.362236 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:15 crc kubenswrapper[5055]: I1011 06:56:15.362200 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:15 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:15 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:15 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:15 crc kubenswrapper[5055]: I1011 06:56:15.362276 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:16 crc kubenswrapper[5055]: I1011 06:56:16.359531 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:16 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:16 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:16 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:16 crc kubenswrapper[5055]: I1011 06:56:16.360465 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:17 crc kubenswrapper[5055]: I1011 06:56:17.358227 5055 patch_prober.go:28] interesting pod/router-default-5444994796-bxbhq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 11 06:56:17 crc kubenswrapper[5055]: [-]has-synced failed: reason withheld Oct 11 06:56:17 crc kubenswrapper[5055]: [+]process-running ok Oct 11 06:56:17 crc kubenswrapper[5055]: healthz check failed Oct 11 06:56:17 crc kubenswrapper[5055]: I1011 06:56:17.358298 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bxbhq" podUID="bdcba18d-9992-4e37-9937-16f727b35e14" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 11 06:56:17 crc kubenswrapper[5055]: I1011 06:56:17.722349 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:56:17 crc kubenswrapper[5055]: I1011 06:56:17.722399 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:56:17 crc kubenswrapper[5055]: I1011 06:56:17.722408 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:56:17 crc kubenswrapper[5055]: I1011 06:56:17.722456 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:56:18 crc kubenswrapper[5055]: I1011 06:56:18.359280 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:56:18 crc kubenswrapper[5055]: I1011 06:56:18.361908 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-bxbhq" Oct 11 06:56:18 crc kubenswrapper[5055]: I1011 06:56:18.789654 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs\") pod \"network-metrics-daemon-glhzm\" (UID: \"c7861c5b-622e-4cce-9360-be9885299bd4\") " pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:56:18 crc kubenswrapper[5055]: I1011 06:56:18.797688 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c7861c5b-622e-4cce-9360-be9885299bd4-metrics-certs\") pod \"network-metrics-daemon-glhzm\" (UID: \"c7861c5b-622e-4cce-9360-be9885299bd4\") " pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:56:18 crc kubenswrapper[5055]: I1011 06:56:18.816542 5055 patch_prober.go:28] interesting pod/console-f9d7485db-gjjf2 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.42:8443/health\": dial tcp 10.217.0.42:8443: connect: connection refused" start-of-body= Oct 11 06:56:18 crc kubenswrapper[5055]: I1011 06:56:18.816590 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-gjjf2" podUID="bbad9e64-0bb0-4acc-9bff-63234abcf93c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.42:8443/health\": dial tcp 10.217.0.42:8443: connect: connection refused" Oct 11 06:56:18 crc kubenswrapper[5055]: I1011 06:56:18.916180 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-glhzm" Oct 11 06:56:27 crc kubenswrapper[5055]: I1011 06:56:27.723118 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:56:27 crc kubenswrapper[5055]: I1011 06:56:27.723118 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:56:27 crc kubenswrapper[5055]: I1011 06:56:27.723629 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:56:27 crc kubenswrapper[5055]: I1011 06:56:27.723675 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-rdv7m" Oct 11 06:56:27 crc kubenswrapper[5055]: I1011 06:56:27.723677 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:56:27 crc kubenswrapper[5055]: I1011 06:56:27.724098 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"66e326178f0a2b114130232d6daa3382f44b8d7c1979b4bbfd070a75bfeace2b"} pod="openshift-console/downloads-7954f5f757-rdv7m" containerMessage="Container download-server failed liveness probe, will be restarted" Oct 11 06:56:27 crc kubenswrapper[5055]: I1011 06:56:27.724199 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" containerID="cri-o://66e326178f0a2b114130232d6daa3382f44b8d7c1979b4bbfd070a75bfeace2b" gracePeriod=2 Oct 11 06:56:27 crc kubenswrapper[5055]: I1011 06:56:27.724461 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:56:27 crc kubenswrapper[5055]: I1011 06:56:27.724502 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:56:28 crc kubenswrapper[5055]: E1011 06:56:28.069631 5055 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc8de3d54_de40_4186_be10_f0a394a18830.slice/crio-66e326178f0a2b114130232d6daa3382f44b8d7c1979b4bbfd070a75bfeace2b.scope\": RecentStats: unable to find data in memory cache]" Oct 11 06:56:28 crc kubenswrapper[5055]: I1011 06:56:28.819502 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:56:28 crc kubenswrapper[5055]: I1011 06:56:28.822419 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 06:56:29 crc kubenswrapper[5055]: I1011 06:56:29.423415 5055 generic.go:334] "Generic (PLEG): container finished" podID="c8de3d54-de40-4186-be10-f0a394a18830" containerID="66e326178f0a2b114130232d6daa3382f44b8d7c1979b4bbfd070a75bfeace2b" exitCode=0 Oct 11 06:56:29 crc kubenswrapper[5055]: I1011 06:56:29.423490 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-rdv7m" event={"ID":"c8de3d54-de40-4186-be10-f0a394a18830","Type":"ContainerDied","Data":"66e326178f0a2b114130232d6daa3382f44b8d7c1979b4bbfd070a75bfeace2b"} Oct 11 06:56:31 crc kubenswrapper[5055]: I1011 06:56:31.450220 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 06:56:32 crc kubenswrapper[5055]: I1011 06:56:32.422387 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 06:56:32 crc kubenswrapper[5055]: I1011 06:56:32.422803 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 06:56:33 crc kubenswrapper[5055]: E1011 06:56:33.419482 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Oct 11 06:56:33 crc kubenswrapper[5055]: E1011 06:56:33.419707 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qw8fw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-wzpf9_openshift-marketplace(a4be1796-a941-4648-b20d-abe63dab37fd): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 06:56:33 crc kubenswrapper[5055]: E1011 06:56:33.420896 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-wzpf9" podUID="a4be1796-a941-4648-b20d-abe63dab37fd" Oct 11 06:56:34 crc kubenswrapper[5055]: E1011 06:56:34.881322 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-wzpf9" podUID="a4be1796-a941-4648-b20d-abe63dab37fd" Oct 11 06:56:37 crc kubenswrapper[5055]: I1011 06:56:37.722573 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:56:37 crc kubenswrapper[5055]: I1011 06:56:37.723199 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:56:38 crc kubenswrapper[5055]: I1011 06:56:38.501883 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-hk8wd" Oct 11 06:56:39 crc kubenswrapper[5055]: E1011 06:56:39.973127 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Oct 11 06:56:39 crc kubenswrapper[5055]: E1011 06:56:39.973297 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dnqcq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-8njcl_openshift-marketplace(e917fbc6-7142-47bd-bd50-43d15fc8376c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 06:56:39 crc kubenswrapper[5055]: E1011 06:56:39.974524 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-8njcl" podUID="e917fbc6-7142-47bd-bd50-43d15fc8376c" Oct 11 06:56:42 crc kubenswrapper[5055]: E1011 06:56:42.782886 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-8njcl" podUID="e917fbc6-7142-47bd-bd50-43d15fc8376c" Oct 11 06:56:43 crc kubenswrapper[5055]: E1011 06:56:43.736618 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Oct 11 06:56:43 crc kubenswrapper[5055]: E1011 06:56:43.737021 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cxnk4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-z7b7n_openshift-marketplace(9cbf151b-75a5-48a9-87e3-6c90e2a897df): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 06:56:43 crc kubenswrapper[5055]: E1011 06:56:43.738303 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-z7b7n" podUID="9cbf151b-75a5-48a9-87e3-6c90e2a897df" Oct 11 06:56:45 crc kubenswrapper[5055]: I1011 06:56:45.142260 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 11 06:56:45 crc kubenswrapper[5055]: E1011 06:56:45.481697 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Oct 11 06:56:45 crc kubenswrapper[5055]: E1011 06:56:45.481856 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tmmf4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-4rsr9_openshift-marketplace(5185075e-f5f6-4244-a81d-6f0943eabaf3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 06:56:45 crc kubenswrapper[5055]: E1011 06:56:45.483058 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-4rsr9" podUID="5185075e-f5f6-4244-a81d-6f0943eabaf3" Oct 11 06:56:47 crc kubenswrapper[5055]: I1011 06:56:47.723081 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:56:47 crc kubenswrapper[5055]: I1011 06:56:47.723442 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:56:49 crc kubenswrapper[5055]: E1011 06:56:49.837215 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-4rsr9" podUID="5185075e-f5f6-4244-a81d-6f0943eabaf3" Oct 11 06:56:49 crc kubenswrapper[5055]: E1011 06:56:49.881994 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Oct 11 06:56:49 crc kubenswrapper[5055]: E1011 06:56:49.882364 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7hvdt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-drmwq_openshift-marketplace(a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 06:56:49 crc kubenswrapper[5055]: E1011 06:56:49.883604 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-drmwq" podUID="a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" Oct 11 06:56:50 crc kubenswrapper[5055]: I1011 06:56:50.224413 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-glhzm"] Oct 11 06:56:50 crc kubenswrapper[5055]: W1011 06:56:50.232554 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7861c5b_622e_4cce_9360_be9885299bd4.slice/crio-680cf3d8a31895bd29749ff91febd5846e4af1d85486a114116ef156676dd576 WatchSource:0}: Error finding container 680cf3d8a31895bd29749ff91febd5846e4af1d85486a114116ef156676dd576: Status 404 returned error can't find the container with id 680cf3d8a31895bd29749ff91febd5846e4af1d85486a114116ef156676dd576 Oct 11 06:56:50 crc kubenswrapper[5055]: I1011 06:56:50.531592 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-glhzm" event={"ID":"c7861c5b-622e-4cce-9360-be9885299bd4","Type":"ContainerStarted","Data":"680cf3d8a31895bd29749ff91febd5846e4af1d85486a114116ef156676dd576"} Oct 11 06:56:50 crc kubenswrapper[5055]: I1011 06:56:50.533964 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-rdv7m" event={"ID":"c8de3d54-de40-4186-be10-f0a394a18830","Type":"ContainerStarted","Data":"12fc6dcfd0dda8488c24f956532f879e5ee7885b5aa45024f325983289507959"} Oct 11 06:56:50 crc kubenswrapper[5055]: E1011 06:56:50.536078 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-drmwq" podUID="a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" Oct 11 06:56:50 crc kubenswrapper[5055]: E1011 06:56:50.764137 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Oct 11 06:56:50 crc kubenswrapper[5055]: E1011 06:56:50.764305 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5nx9l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-5j92b_openshift-marketplace(8d42319b-9bfe-4681-8e7f-dd085d3aef16): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 06:56:50 crc kubenswrapper[5055]: E1011 06:56:50.765509 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-5j92b" podUID="8d42319b-9bfe-4681-8e7f-dd085d3aef16" Oct 11 06:56:51 crc kubenswrapper[5055]: I1011 06:56:51.540454 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-glhzm" event={"ID":"c7861c5b-622e-4cce-9360-be9885299bd4","Type":"ContainerStarted","Data":"1772d7ba1df5db49073ee6917c80f9d359d35a55a9a9adf5b765600a15e41d5c"} Oct 11 06:56:51 crc kubenswrapper[5055]: I1011 06:56:51.540817 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-rdv7m" Oct 11 06:56:51 crc kubenswrapper[5055]: I1011 06:56:51.541521 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:56:51 crc kubenswrapper[5055]: I1011 06:56:51.541560 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:56:51 crc kubenswrapper[5055]: E1011 06:56:51.542523 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-5j92b" podUID="8d42319b-9bfe-4681-8e7f-dd085d3aef16" Oct 11 06:56:52 crc kubenswrapper[5055]: I1011 06:56:52.546354 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-glhzm" event={"ID":"c7861c5b-622e-4cce-9360-be9885299bd4","Type":"ContainerStarted","Data":"ce8ff8c5e61aa94783fef2b8d654ccab6432e6c457b6bbbd0c0bf1c3823b493f"} Oct 11 06:56:52 crc kubenswrapper[5055]: I1011 06:56:52.546886 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:56:52 crc kubenswrapper[5055]: I1011 06:56:52.546950 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:56:53 crc kubenswrapper[5055]: E1011 06:56:53.164564 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Oct 11 06:56:53 crc kubenswrapper[5055]: E1011 06:56:53.164727 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gntzf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-6vv9z_openshift-marketplace(48eaf223-88fa-49e9-98e8-c971092d0fd8): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 06:56:53 crc kubenswrapper[5055]: E1011 06:56:53.165998 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-6vv9z" podUID="48eaf223-88fa-49e9-98e8-c971092d0fd8" Oct 11 06:56:53 crc kubenswrapper[5055]: E1011 06:56:53.552945 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-6vv9z" podUID="48eaf223-88fa-49e9-98e8-c971092d0fd8" Oct 11 06:56:53 crc kubenswrapper[5055]: I1011 06:56:53.565233 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-glhzm" podStartSLOduration=177.565214446 podStartE2EDuration="2m57.565214446s" podCreationTimestamp="2025-10-11 06:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:56:53.564128953 +0000 UTC m=+197.338402780" watchObservedRunningTime="2025-10-11 06:56:53.565214446 +0000 UTC m=+197.339488253" Oct 11 06:56:54 crc kubenswrapper[5055]: E1011 06:56:54.529945 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Oct 11 06:56:54 crc kubenswrapper[5055]: E1011 06:56:54.530163 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fsttl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-p9vsc_openshift-marketplace(4a62c31d-0b47-44e5-992b-7c9f99997384): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 06:56:54 crc kubenswrapper[5055]: E1011 06:56:54.531585 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-p9vsc" podUID="4a62c31d-0b47-44e5-992b-7c9f99997384" Oct 11 06:56:54 crc kubenswrapper[5055]: E1011 06:56:54.556683 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-p9vsc" podUID="4a62c31d-0b47-44e5-992b-7c9f99997384" Oct 11 06:56:57 crc kubenswrapper[5055]: I1011 06:56:57.722377 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:56:57 crc kubenswrapper[5055]: I1011 06:56:57.723970 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:56:57 crc kubenswrapper[5055]: I1011 06:56:57.722634 5055 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdv7m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Oct 11 06:56:57 crc kubenswrapper[5055]: I1011 06:56:57.724150 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdv7m" podUID="c8de3d54-de40-4186-be10-f0a394a18830" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.36:8080/\": dial tcp 10.217.0.36:8080: connect: connection refused" Oct 11 06:57:02 crc kubenswrapper[5055]: I1011 06:57:02.422717 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 06:57:02 crc kubenswrapper[5055]: I1011 06:57:02.423144 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 06:57:02 crc kubenswrapper[5055]: I1011 06:57:02.423199 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 06:57:02 crc kubenswrapper[5055]: I1011 06:57:02.423841 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 06:57:02 crc kubenswrapper[5055]: I1011 06:57:02.423912 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e" gracePeriod=600 Oct 11 06:57:03 crc kubenswrapper[5055]: I1011 06:57:03.602208 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e" exitCode=0 Oct 11 06:57:03 crc kubenswrapper[5055]: I1011 06:57:03.602259 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e"} Oct 11 06:57:07 crc kubenswrapper[5055]: I1011 06:57:07.623330 5055 generic.go:334] "Generic (PLEG): container finished" podID="a4be1796-a941-4648-b20d-abe63dab37fd" containerID="52bb64eb530d5ce2c853be264a3c63b4c485212035a9761c80e4ff489fb74b1f" exitCode=0 Oct 11 06:57:07 crc kubenswrapper[5055]: I1011 06:57:07.623425 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wzpf9" event={"ID":"a4be1796-a941-4648-b20d-abe63dab37fd","Type":"ContainerDied","Data":"52bb64eb530d5ce2c853be264a3c63b4c485212035a9761c80e4ff489fb74b1f"} Oct 11 06:57:07 crc kubenswrapper[5055]: I1011 06:57:07.626847 5055 generic.go:334] "Generic (PLEG): container finished" podID="e917fbc6-7142-47bd-bd50-43d15fc8376c" containerID="c69532fbe2b9c799b640de676a4f52663901c34059f03b0a3bc728ae71e68c80" exitCode=0 Oct 11 06:57:07 crc kubenswrapper[5055]: I1011 06:57:07.626909 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8njcl" event={"ID":"e917fbc6-7142-47bd-bd50-43d15fc8376c","Type":"ContainerDied","Data":"c69532fbe2b9c799b640de676a4f52663901c34059f03b0a3bc728ae71e68c80"} Oct 11 06:57:07 crc kubenswrapper[5055]: I1011 06:57:07.629889 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"0bef0585b62d67d040f6467382e7b6baaa0abbf7bfa19e2efffdf735fa6706f2"} Oct 11 06:57:07 crc kubenswrapper[5055]: I1011 06:57:07.743169 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-rdv7m" Oct 11 06:57:08 crc kubenswrapper[5055]: I1011 06:57:08.636665 5055 generic.go:334] "Generic (PLEG): container finished" podID="9cbf151b-75a5-48a9-87e3-6c90e2a897df" containerID="01eaf1f37c305247cd5f941a29ad4ad7be9deae72c12c67e6ac236c1a5807285" exitCode=0 Oct 11 06:57:08 crc kubenswrapper[5055]: I1011 06:57:08.636734 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7b7n" event={"ID":"9cbf151b-75a5-48a9-87e3-6c90e2a897df","Type":"ContainerDied","Data":"01eaf1f37c305247cd5f941a29ad4ad7be9deae72c12c67e6ac236c1a5807285"} Oct 11 06:57:08 crc kubenswrapper[5055]: I1011 06:57:08.645100 5055 generic.go:334] "Generic (PLEG): container finished" podID="5185075e-f5f6-4244-a81d-6f0943eabaf3" containerID="6600a46432ee4c0b9b50d34c69265de705f504835767b0e159cf33f6bccf8b4d" exitCode=0 Oct 11 06:57:08 crc kubenswrapper[5055]: I1011 06:57:08.645272 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rsr9" event={"ID":"5185075e-f5f6-4244-a81d-6f0943eabaf3","Type":"ContainerDied","Data":"6600a46432ee4c0b9b50d34c69265de705f504835767b0e159cf33f6bccf8b4d"} Oct 11 06:57:09 crc kubenswrapper[5055]: I1011 06:57:09.653262 5055 generic.go:334] "Generic (PLEG): container finished" podID="8d42319b-9bfe-4681-8e7f-dd085d3aef16" containerID="d11b20da88526a66f16ee2b49ccd0fec0a37674cf15ca4740ba4f24cfbc29dbf" exitCode=0 Oct 11 06:57:09 crc kubenswrapper[5055]: I1011 06:57:09.653351 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5j92b" event={"ID":"8d42319b-9bfe-4681-8e7f-dd085d3aef16","Type":"ContainerDied","Data":"d11b20da88526a66f16ee2b49ccd0fec0a37674cf15ca4740ba4f24cfbc29dbf"} Oct 11 06:57:09 crc kubenswrapper[5055]: I1011 06:57:09.655285 5055 generic.go:334] "Generic (PLEG): container finished" podID="a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" containerID="4bdf4916a6718e8735ffb3d8e8e5da77e11ce6328b6c5d84d95b225cd249203b" exitCode=0 Oct 11 06:57:09 crc kubenswrapper[5055]: I1011 06:57:09.655349 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-drmwq" event={"ID":"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da","Type":"ContainerDied","Data":"4bdf4916a6718e8735ffb3d8e8e5da77e11ce6328b6c5d84d95b225cd249203b"} Oct 11 06:57:09 crc kubenswrapper[5055]: I1011 06:57:09.658238 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wzpf9" event={"ID":"a4be1796-a941-4648-b20d-abe63dab37fd","Type":"ContainerStarted","Data":"9b213d5ea74ed73d2df59d5f6fe0b218b5ff6d47ca9dbaa00d512156d88799bb"} Oct 11 06:57:09 crc kubenswrapper[5055]: I1011 06:57:09.660232 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8njcl" event={"ID":"e917fbc6-7142-47bd-bd50-43d15fc8376c","Type":"ContainerStarted","Data":"37c1c3311da896f56e0635921d74a136e2e05530da91f8a7cc2cbfca0b2f960e"} Oct 11 06:57:09 crc kubenswrapper[5055]: I1011 06:57:09.685593 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8njcl" podStartSLOduration=5.066105644 podStartE2EDuration="1m2.685578403s" podCreationTimestamp="2025-10-11 06:56:07 +0000 UTC" firstStartedPulling="2025-10-11 06:56:11.282511828 +0000 UTC m=+155.056785635" lastFinishedPulling="2025-10-11 06:57:08.901984587 +0000 UTC m=+212.676258394" observedRunningTime="2025-10-11 06:57:09.685365007 +0000 UTC m=+213.459638814" watchObservedRunningTime="2025-10-11 06:57:09.685578403 +0000 UTC m=+213.459852210" Oct 11 06:57:09 crc kubenswrapper[5055]: I1011 06:57:09.706181 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wzpf9" podStartSLOduration=3.475836506 podStartE2EDuration="1m2.706167596s" podCreationTimestamp="2025-10-11 06:56:07 +0000 UTC" firstStartedPulling="2025-10-11 06:56:09.235091927 +0000 UTC m=+153.009365734" lastFinishedPulling="2025-10-11 06:57:08.465423017 +0000 UTC m=+212.239696824" observedRunningTime="2025-10-11 06:57:09.70228918 +0000 UTC m=+213.476562987" watchObservedRunningTime="2025-10-11 06:57:09.706167596 +0000 UTC m=+213.480441403" Oct 11 06:57:11 crc kubenswrapper[5055]: I1011 06:57:11.674609 5055 generic.go:334] "Generic (PLEG): container finished" podID="48eaf223-88fa-49e9-98e8-c971092d0fd8" containerID="a5e1ea3482776996cdc3ae7f4a61f26b17f522fdfda7a0aef9bb2b3a1f756ad5" exitCode=0 Oct 11 06:57:11 crc kubenswrapper[5055]: I1011 06:57:11.674689 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vv9z" event={"ID":"48eaf223-88fa-49e9-98e8-c971092d0fd8","Type":"ContainerDied","Data":"a5e1ea3482776996cdc3ae7f4a61f26b17f522fdfda7a0aef9bb2b3a1f756ad5"} Oct 11 06:57:11 crc kubenswrapper[5055]: I1011 06:57:11.679555 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7b7n" event={"ID":"9cbf151b-75a5-48a9-87e3-6c90e2a897df","Type":"ContainerStarted","Data":"b0ebf5d1858293bdde1430b04411172cf07e2585c416cc5ed9ce590ad931dae8"} Oct 11 06:57:11 crc kubenswrapper[5055]: I1011 06:57:11.683274 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rsr9" event={"ID":"5185075e-f5f6-4244-a81d-6f0943eabaf3","Type":"ContainerStarted","Data":"d63e6a0e1b2c53a369157a82f99845ea96f6358b601ae93fea5e3b919375b040"} Oct 11 06:57:11 crc kubenswrapper[5055]: I1011 06:57:11.685096 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-drmwq" event={"ID":"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da","Type":"ContainerStarted","Data":"d654024e5b195e355f1eebc3ec2e9d6e9718aaa93c0a775ec66db3c7e719f2b4"} Oct 11 06:57:11 crc kubenswrapper[5055]: I1011 06:57:11.731375 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-drmwq" podStartSLOduration=1.908480213 podStartE2EDuration="1m0.731358384s" podCreationTimestamp="2025-10-11 06:56:11 +0000 UTC" firstStartedPulling="2025-10-11 06:56:12.302569615 +0000 UTC m=+156.076843422" lastFinishedPulling="2025-10-11 06:57:11.125447776 +0000 UTC m=+214.899721593" observedRunningTime="2025-10-11 06:57:11.722415388 +0000 UTC m=+215.496689205" watchObservedRunningTime="2025-10-11 06:57:11.731358384 +0000 UTC m=+215.505632191" Oct 11 06:57:11 crc kubenswrapper[5055]: I1011 06:57:11.775517 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-z7b7n" podStartSLOduration=5.146404344 podStartE2EDuration="1m4.775501428s" podCreationTimestamp="2025-10-11 06:56:07 +0000 UTC" firstStartedPulling="2025-10-11 06:56:11.29399379 +0000 UTC m=+155.068267597" lastFinishedPulling="2025-10-11 06:57:10.923090874 +0000 UTC m=+214.697364681" observedRunningTime="2025-10-11 06:57:11.751324399 +0000 UTC m=+215.525598206" watchObservedRunningTime="2025-10-11 06:57:11.775501428 +0000 UTC m=+215.549775235" Oct 11 06:57:11 crc kubenswrapper[5055]: I1011 06:57:11.778504 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4rsr9" podStartSLOduration=3.264442264 podStartE2EDuration="1m2.778493607s" podCreationTimestamp="2025-10-11 06:56:09 +0000 UTC" firstStartedPulling="2025-10-11 06:56:11.288153565 +0000 UTC m=+155.062427372" lastFinishedPulling="2025-10-11 06:57:10.802204908 +0000 UTC m=+214.576478715" observedRunningTime="2025-10-11 06:57:11.775337543 +0000 UTC m=+215.549611360" watchObservedRunningTime="2025-10-11 06:57:11.778493607 +0000 UTC m=+215.552767414" Oct 11 06:57:12 crc kubenswrapper[5055]: I1011 06:57:12.691778 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p9vsc" event={"ID":"4a62c31d-0b47-44e5-992b-7c9f99997384","Type":"ContainerStarted","Data":"66f3e96750362e8650a28dcadf25ac5fbedbb0a38c352575562b4a4f248714da"} Oct 11 06:57:12 crc kubenswrapper[5055]: I1011 06:57:12.696050 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5j92b" event={"ID":"8d42319b-9bfe-4681-8e7f-dd085d3aef16","Type":"ContainerStarted","Data":"70833a01fcaf8df77b2e82b0d79a1320a7bb797e6e8266f53445b778214d3d4a"} Oct 11 06:57:12 crc kubenswrapper[5055]: I1011 06:57:12.735599 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5j92b" podStartSLOduration=3.506272128 podStartE2EDuration="1m2.735582925s" podCreationTimestamp="2025-10-11 06:56:10 +0000 UTC" firstStartedPulling="2025-10-11 06:56:12.300120889 +0000 UTC m=+156.074394696" lastFinishedPulling="2025-10-11 06:57:11.529431686 +0000 UTC m=+215.303705493" observedRunningTime="2025-10-11 06:57:12.730844434 +0000 UTC m=+216.505118241" watchObservedRunningTime="2025-10-11 06:57:12.735582925 +0000 UTC m=+216.509856732" Oct 11 06:57:13 crc kubenswrapper[5055]: I1011 06:57:13.708301 5055 generic.go:334] "Generic (PLEG): container finished" podID="4a62c31d-0b47-44e5-992b-7c9f99997384" containerID="66f3e96750362e8650a28dcadf25ac5fbedbb0a38c352575562b4a4f248714da" exitCode=0 Oct 11 06:57:13 crc kubenswrapper[5055]: I1011 06:57:13.708358 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p9vsc" event={"ID":"4a62c31d-0b47-44e5-992b-7c9f99997384","Type":"ContainerDied","Data":"66f3e96750362e8650a28dcadf25ac5fbedbb0a38c352575562b4a4f248714da"} Oct 11 06:57:13 crc kubenswrapper[5055]: I1011 06:57:13.711200 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vv9z" event={"ID":"48eaf223-88fa-49e9-98e8-c971092d0fd8","Type":"ContainerStarted","Data":"c0141be2133a2b6739cc87fe83aa754276060d5eac28be2f53595599f6d35228"} Oct 11 06:57:13 crc kubenswrapper[5055]: I1011 06:57:13.747425 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6vv9z" podStartSLOduration=3.399190288 podStartE2EDuration="1m4.747406731s" podCreationTimestamp="2025-10-11 06:56:09 +0000 UTC" firstStartedPulling="2025-10-11 06:56:11.285302345 +0000 UTC m=+155.059576152" lastFinishedPulling="2025-10-11 06:57:12.633518788 +0000 UTC m=+216.407792595" observedRunningTime="2025-10-11 06:57:13.746119753 +0000 UTC m=+217.520393560" watchObservedRunningTime="2025-10-11 06:57:13.747406731 +0000 UTC m=+217.521680538" Oct 11 06:57:15 crc kubenswrapper[5055]: I1011 06:57:15.732810 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p9vsc" event={"ID":"4a62c31d-0b47-44e5-992b-7c9f99997384","Type":"ContainerStarted","Data":"3471493adeadb25a4fc102dc0da9d141206d98487b6912f1ce9ce2a31f05f923"} Oct 11 06:57:15 crc kubenswrapper[5055]: I1011 06:57:15.751424 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-p9vsc" podStartSLOduration=3.356579371 podStartE2EDuration="1m8.75140825s" podCreationTimestamp="2025-10-11 06:56:07 +0000 UTC" firstStartedPulling="2025-10-11 06:56:09.241620686 +0000 UTC m=+153.015894493" lastFinishedPulling="2025-10-11 06:57:14.636449565 +0000 UTC m=+218.410723372" observedRunningTime="2025-10-11 06:57:15.750968667 +0000 UTC m=+219.525242464" watchObservedRunningTime="2025-10-11 06:57:15.75140825 +0000 UTC m=+219.525682057" Oct 11 06:57:17 crc kubenswrapper[5055]: I1011 06:57:17.782064 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:57:17 crc kubenswrapper[5055]: I1011 06:57:17.782645 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:57:17 crc kubenswrapper[5055]: I1011 06:57:17.931736 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:57:18 crc kubenswrapper[5055]: I1011 06:57:18.177047 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:57:18 crc kubenswrapper[5055]: I1011 06:57:18.177090 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:57:18 crc kubenswrapper[5055]: I1011 06:57:18.225022 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:57:18 crc kubenswrapper[5055]: I1011 06:57:18.546254 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:57:18 crc kubenswrapper[5055]: I1011 06:57:18.546304 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:57:18 crc kubenswrapper[5055]: I1011 06:57:18.550671 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:57:18 crc kubenswrapper[5055]: I1011 06:57:18.550727 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:57:18 crc kubenswrapper[5055]: I1011 06:57:18.588193 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:57:18 crc kubenswrapper[5055]: I1011 06:57:18.594103 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:57:18 crc kubenswrapper[5055]: I1011 06:57:18.781281 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:57:18 crc kubenswrapper[5055]: I1011 06:57:18.785235 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:57:18 crc kubenswrapper[5055]: I1011 06:57:18.793465 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:57:19 crc kubenswrapper[5055]: I1011 06:57:19.579557 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:57:19 crc kubenswrapper[5055]: I1011 06:57:19.580062 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:57:19 crc kubenswrapper[5055]: I1011 06:57:19.625972 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:57:19 crc kubenswrapper[5055]: I1011 06:57:19.794398 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:57:19 crc kubenswrapper[5055]: I1011 06:57:19.795258 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:57:20 crc kubenswrapper[5055]: I1011 06:57:20.059031 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wzpf9"] Oct 11 06:57:20 crc kubenswrapper[5055]: I1011 06:57:20.272832 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:57:20 crc kubenswrapper[5055]: I1011 06:57:20.272900 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:57:20 crc kubenswrapper[5055]: I1011 06:57:20.315411 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:57:20 crc kubenswrapper[5055]: I1011 06:57:20.755706 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wzpf9" podUID="a4be1796-a941-4648-b20d-abe63dab37fd" containerName="registry-server" containerID="cri-o://9b213d5ea74ed73d2df59d5f6fe0b218b5ff6d47ca9dbaa00d512156d88799bb" gracePeriod=2 Oct 11 06:57:20 crc kubenswrapper[5055]: I1011 06:57:20.796510 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:57:20 crc kubenswrapper[5055]: I1011 06:57:20.974716 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:57:20 crc kubenswrapper[5055]: I1011 06:57:20.975508 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:57:21 crc kubenswrapper[5055]: I1011 06:57:21.024904 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:57:21 crc kubenswrapper[5055]: I1011 06:57:21.372923 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:57:21 crc kubenswrapper[5055]: I1011 06:57:21.373240 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:57:21 crc kubenswrapper[5055]: I1011 06:57:21.412046 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:57:21 crc kubenswrapper[5055]: I1011 06:57:21.765934 5055 generic.go:334] "Generic (PLEG): container finished" podID="a4be1796-a941-4648-b20d-abe63dab37fd" containerID="9b213d5ea74ed73d2df59d5f6fe0b218b5ff6d47ca9dbaa00d512156d88799bb" exitCode=0 Oct 11 06:57:21 crc kubenswrapper[5055]: I1011 06:57:21.766105 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wzpf9" event={"ID":"a4be1796-a941-4648-b20d-abe63dab37fd","Type":"ContainerDied","Data":"9b213d5ea74ed73d2df59d5f6fe0b218b5ff6d47ca9dbaa00d512156d88799bb"} Oct 11 06:57:21 crc kubenswrapper[5055]: I1011 06:57:21.811600 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:57:21 crc kubenswrapper[5055]: I1011 06:57:21.812051 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:57:21 crc kubenswrapper[5055]: I1011 06:57:21.859702 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z7b7n"] Oct 11 06:57:21 crc kubenswrapper[5055]: I1011 06:57:21.860073 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-z7b7n" podUID="9cbf151b-75a5-48a9-87e3-6c90e2a897df" containerName="registry-server" containerID="cri-o://b0ebf5d1858293bdde1430b04411172cf07e2585c416cc5ed9ce590ad931dae8" gracePeriod=2 Oct 11 06:57:22 crc kubenswrapper[5055]: I1011 06:57:22.459306 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vv9z"] Oct 11 06:57:22 crc kubenswrapper[5055]: I1011 06:57:22.783167 5055 generic.go:334] "Generic (PLEG): container finished" podID="9cbf151b-75a5-48a9-87e3-6c90e2a897df" containerID="b0ebf5d1858293bdde1430b04411172cf07e2585c416cc5ed9ce590ad931dae8" exitCode=0 Oct 11 06:57:22 crc kubenswrapper[5055]: I1011 06:57:22.783335 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7b7n" event={"ID":"9cbf151b-75a5-48a9-87e3-6c90e2a897df","Type":"ContainerDied","Data":"b0ebf5d1858293bdde1430b04411172cf07e2585c416cc5ed9ce590ad931dae8"} Oct 11 06:57:22 crc kubenswrapper[5055]: I1011 06:57:22.783582 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6vv9z" podUID="48eaf223-88fa-49e9-98e8-c971092d0fd8" containerName="registry-server" containerID="cri-o://c0141be2133a2b6739cc87fe83aa754276060d5eac28be2f53595599f6d35228" gracePeriod=2 Oct 11 06:57:22 crc kubenswrapper[5055]: I1011 06:57:22.992383 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.118274 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4be1796-a941-4648-b20d-abe63dab37fd-catalog-content\") pod \"a4be1796-a941-4648-b20d-abe63dab37fd\" (UID: \"a4be1796-a941-4648-b20d-abe63dab37fd\") " Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.118554 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qw8fw\" (UniqueName: \"kubernetes.io/projected/a4be1796-a941-4648-b20d-abe63dab37fd-kube-api-access-qw8fw\") pod \"a4be1796-a941-4648-b20d-abe63dab37fd\" (UID: \"a4be1796-a941-4648-b20d-abe63dab37fd\") " Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.118591 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4be1796-a941-4648-b20d-abe63dab37fd-utilities\") pod \"a4be1796-a941-4648-b20d-abe63dab37fd\" (UID: \"a4be1796-a941-4648-b20d-abe63dab37fd\") " Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.119493 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4be1796-a941-4648-b20d-abe63dab37fd-utilities" (OuterVolumeSpecName: "utilities") pod "a4be1796-a941-4648-b20d-abe63dab37fd" (UID: "a4be1796-a941-4648-b20d-abe63dab37fd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.137384 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4be1796-a941-4648-b20d-abe63dab37fd-kube-api-access-qw8fw" (OuterVolumeSpecName: "kube-api-access-qw8fw") pod "a4be1796-a941-4648-b20d-abe63dab37fd" (UID: "a4be1796-a941-4648-b20d-abe63dab37fd"). InnerVolumeSpecName "kube-api-access-qw8fw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.164790 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4be1796-a941-4648-b20d-abe63dab37fd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a4be1796-a941-4648-b20d-abe63dab37fd" (UID: "a4be1796-a941-4648-b20d-abe63dab37fd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.221146 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4be1796-a941-4648-b20d-abe63dab37fd-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.221212 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qw8fw\" (UniqueName: \"kubernetes.io/projected/a4be1796-a941-4648-b20d-abe63dab37fd-kube-api-access-qw8fw\") on node \"crc\" DevicePath \"\"" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.221240 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4be1796-a941-4648-b20d-abe63dab37fd-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.243547 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.322412 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cxnk4\" (UniqueName: \"kubernetes.io/projected/9cbf151b-75a5-48a9-87e3-6c90e2a897df-kube-api-access-cxnk4\") pod \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\" (UID: \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\") " Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.322623 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cbf151b-75a5-48a9-87e3-6c90e2a897df-catalog-content\") pod \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\" (UID: \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\") " Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.322720 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cbf151b-75a5-48a9-87e3-6c90e2a897df-utilities\") pod \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\" (UID: \"9cbf151b-75a5-48a9-87e3-6c90e2a897df\") " Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.323742 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cbf151b-75a5-48a9-87e3-6c90e2a897df-utilities" (OuterVolumeSpecName: "utilities") pod "9cbf151b-75a5-48a9-87e3-6c90e2a897df" (UID: "9cbf151b-75a5-48a9-87e3-6c90e2a897df"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.325830 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cbf151b-75a5-48a9-87e3-6c90e2a897df-kube-api-access-cxnk4" (OuterVolumeSpecName: "kube-api-access-cxnk4") pod "9cbf151b-75a5-48a9-87e3-6c90e2a897df" (UID: "9cbf151b-75a5-48a9-87e3-6c90e2a897df"). InnerVolumeSpecName "kube-api-access-cxnk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.375427 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cbf151b-75a5-48a9-87e3-6c90e2a897df-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9cbf151b-75a5-48a9-87e3-6c90e2a897df" (UID: "9cbf151b-75a5-48a9-87e3-6c90e2a897df"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.424485 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cbf151b-75a5-48a9-87e3-6c90e2a897df-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.424523 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cbf151b-75a5-48a9-87e3-6c90e2a897df-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.424533 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cxnk4\" (UniqueName: \"kubernetes.io/projected/9cbf151b-75a5-48a9-87e3-6c90e2a897df-kube-api-access-cxnk4\") on node \"crc\" DevicePath \"\"" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.573076 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.728147 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48eaf223-88fa-49e9-98e8-c971092d0fd8-catalog-content\") pod \"48eaf223-88fa-49e9-98e8-c971092d0fd8\" (UID: \"48eaf223-88fa-49e9-98e8-c971092d0fd8\") " Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.728234 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48eaf223-88fa-49e9-98e8-c971092d0fd8-utilities\") pod \"48eaf223-88fa-49e9-98e8-c971092d0fd8\" (UID: \"48eaf223-88fa-49e9-98e8-c971092d0fd8\") " Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.728258 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gntzf\" (UniqueName: \"kubernetes.io/projected/48eaf223-88fa-49e9-98e8-c971092d0fd8-kube-api-access-gntzf\") pod \"48eaf223-88fa-49e9-98e8-c971092d0fd8\" (UID: \"48eaf223-88fa-49e9-98e8-c971092d0fd8\") " Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.729227 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48eaf223-88fa-49e9-98e8-c971092d0fd8-utilities" (OuterVolumeSpecName: "utilities") pod "48eaf223-88fa-49e9-98e8-c971092d0fd8" (UID: "48eaf223-88fa-49e9-98e8-c971092d0fd8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.731561 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48eaf223-88fa-49e9-98e8-c971092d0fd8-kube-api-access-gntzf" (OuterVolumeSpecName: "kube-api-access-gntzf") pod "48eaf223-88fa-49e9-98e8-c971092d0fd8" (UID: "48eaf223-88fa-49e9-98e8-c971092d0fd8"). InnerVolumeSpecName "kube-api-access-gntzf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.739971 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48eaf223-88fa-49e9-98e8-c971092d0fd8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "48eaf223-88fa-49e9-98e8-c971092d0fd8" (UID: "48eaf223-88fa-49e9-98e8-c971092d0fd8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.790207 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wzpf9" event={"ID":"a4be1796-a941-4648-b20d-abe63dab37fd","Type":"ContainerDied","Data":"d577f399587408ddcec56d8b3470c887d8a2998a98df9cf5f2f4659bce91dfd7"} Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.790246 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wzpf9" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.790269 5055 scope.go:117] "RemoveContainer" containerID="9b213d5ea74ed73d2df59d5f6fe0b218b5ff6d47ca9dbaa00d512156d88799bb" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.793410 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6vv9z" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.793403 5055 generic.go:334] "Generic (PLEG): container finished" podID="48eaf223-88fa-49e9-98e8-c971092d0fd8" containerID="c0141be2133a2b6739cc87fe83aa754276060d5eac28be2f53595599f6d35228" exitCode=0 Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.793513 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vv9z" event={"ID":"48eaf223-88fa-49e9-98e8-c971092d0fd8","Type":"ContainerDied","Data":"c0141be2133a2b6739cc87fe83aa754276060d5eac28be2f53595599f6d35228"} Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.793559 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6vv9z" event={"ID":"48eaf223-88fa-49e9-98e8-c971092d0fd8","Type":"ContainerDied","Data":"e4cb7c9d907943826960204a4bf8fc2c3a57f88d4b1b4cb2bc8fbae95a64cf10"} Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.796088 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z7b7n" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.796099 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z7b7n" event={"ID":"9cbf151b-75a5-48a9-87e3-6c90e2a897df","Type":"ContainerDied","Data":"a7aeb421f899574b912b3422b758ea44c6bd2a13cfe518eda16579d3763f8334"} Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.811664 5055 scope.go:117] "RemoveContainer" containerID="52bb64eb530d5ce2c853be264a3c63b4c485212035a9761c80e4ff489fb74b1f" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.830515 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wzpf9"] Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.830881 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48eaf223-88fa-49e9-98e8-c971092d0fd8-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.831122 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48eaf223-88fa-49e9-98e8-c971092d0fd8-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.831135 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gntzf\" (UniqueName: \"kubernetes.io/projected/48eaf223-88fa-49e9-98e8-c971092d0fd8-kube-api-access-gntzf\") on node \"crc\" DevicePath \"\"" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.834679 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wzpf9"] Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.847166 5055 scope.go:117] "RemoveContainer" containerID="a5ed70aa31b6f53d6ab53070addfc5a81d7882c20784132274ecf0593a2d501f" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.848908 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vv9z"] Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.853186 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6vv9z"] Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.863189 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z7b7n"] Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.868178 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-z7b7n"] Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.880279 5055 scope.go:117] "RemoveContainer" containerID="c0141be2133a2b6739cc87fe83aa754276060d5eac28be2f53595599f6d35228" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.901412 5055 scope.go:117] "RemoveContainer" containerID="a5e1ea3482776996cdc3ae7f4a61f26b17f522fdfda7a0aef9bb2b3a1f756ad5" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.915786 5055 scope.go:117] "RemoveContainer" containerID="f6d67b41ec59022d7f0a88fbc92c78d536cd1b5fddffca6ad3f9df9fc723ba3a" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.928610 5055 scope.go:117] "RemoveContainer" containerID="c0141be2133a2b6739cc87fe83aa754276060d5eac28be2f53595599f6d35228" Oct 11 06:57:23 crc kubenswrapper[5055]: E1011 06:57:23.929015 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0141be2133a2b6739cc87fe83aa754276060d5eac28be2f53595599f6d35228\": container with ID starting with c0141be2133a2b6739cc87fe83aa754276060d5eac28be2f53595599f6d35228 not found: ID does not exist" containerID="c0141be2133a2b6739cc87fe83aa754276060d5eac28be2f53595599f6d35228" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.929052 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0141be2133a2b6739cc87fe83aa754276060d5eac28be2f53595599f6d35228"} err="failed to get container status \"c0141be2133a2b6739cc87fe83aa754276060d5eac28be2f53595599f6d35228\": rpc error: code = NotFound desc = could not find container \"c0141be2133a2b6739cc87fe83aa754276060d5eac28be2f53595599f6d35228\": container with ID starting with c0141be2133a2b6739cc87fe83aa754276060d5eac28be2f53595599f6d35228 not found: ID does not exist" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.929080 5055 scope.go:117] "RemoveContainer" containerID="a5e1ea3482776996cdc3ae7f4a61f26b17f522fdfda7a0aef9bb2b3a1f756ad5" Oct 11 06:57:23 crc kubenswrapper[5055]: E1011 06:57:23.929439 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5e1ea3482776996cdc3ae7f4a61f26b17f522fdfda7a0aef9bb2b3a1f756ad5\": container with ID starting with a5e1ea3482776996cdc3ae7f4a61f26b17f522fdfda7a0aef9bb2b3a1f756ad5 not found: ID does not exist" containerID="a5e1ea3482776996cdc3ae7f4a61f26b17f522fdfda7a0aef9bb2b3a1f756ad5" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.929471 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5e1ea3482776996cdc3ae7f4a61f26b17f522fdfda7a0aef9bb2b3a1f756ad5"} err="failed to get container status \"a5e1ea3482776996cdc3ae7f4a61f26b17f522fdfda7a0aef9bb2b3a1f756ad5\": rpc error: code = NotFound desc = could not find container \"a5e1ea3482776996cdc3ae7f4a61f26b17f522fdfda7a0aef9bb2b3a1f756ad5\": container with ID starting with a5e1ea3482776996cdc3ae7f4a61f26b17f522fdfda7a0aef9bb2b3a1f756ad5 not found: ID does not exist" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.929491 5055 scope.go:117] "RemoveContainer" containerID="f6d67b41ec59022d7f0a88fbc92c78d536cd1b5fddffca6ad3f9df9fc723ba3a" Oct 11 06:57:23 crc kubenswrapper[5055]: E1011 06:57:23.929705 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6d67b41ec59022d7f0a88fbc92c78d536cd1b5fddffca6ad3f9df9fc723ba3a\": container with ID starting with f6d67b41ec59022d7f0a88fbc92c78d536cd1b5fddffca6ad3f9df9fc723ba3a not found: ID does not exist" containerID="f6d67b41ec59022d7f0a88fbc92c78d536cd1b5fddffca6ad3f9df9fc723ba3a" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.929734 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6d67b41ec59022d7f0a88fbc92c78d536cd1b5fddffca6ad3f9df9fc723ba3a"} err="failed to get container status \"f6d67b41ec59022d7f0a88fbc92c78d536cd1b5fddffca6ad3f9df9fc723ba3a\": rpc error: code = NotFound desc = could not find container \"f6d67b41ec59022d7f0a88fbc92c78d536cd1b5fddffca6ad3f9df9fc723ba3a\": container with ID starting with f6d67b41ec59022d7f0a88fbc92c78d536cd1b5fddffca6ad3f9df9fc723ba3a not found: ID does not exist" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.929755 5055 scope.go:117] "RemoveContainer" containerID="b0ebf5d1858293bdde1430b04411172cf07e2585c416cc5ed9ce590ad931dae8" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.943704 5055 scope.go:117] "RemoveContainer" containerID="01eaf1f37c305247cd5f941a29ad4ad7be9deae72c12c67e6ac236c1a5807285" Oct 11 06:57:23 crc kubenswrapper[5055]: I1011 06:57:23.957550 5055 scope.go:117] "RemoveContainer" containerID="c27ead34ccc12d24a85d12684b47cbb7856c69feba93866df366f930c7506a80" Oct 11 06:57:24 crc kubenswrapper[5055]: I1011 06:57:24.858739 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-drmwq"] Oct 11 06:57:24 crc kubenswrapper[5055]: I1011 06:57:24.859324 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-drmwq" podUID="a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" containerName="registry-server" containerID="cri-o://d654024e5b195e355f1eebc3ec2e9d6e9718aaa93c0a775ec66db3c7e719f2b4" gracePeriod=2 Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.000248 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48eaf223-88fa-49e9-98e8-c971092d0fd8" path="/var/lib/kubelet/pods/48eaf223-88fa-49e9-98e8-c971092d0fd8/volumes" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.001204 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cbf151b-75a5-48a9-87e3-6c90e2a897df" path="/var/lib/kubelet/pods/9cbf151b-75a5-48a9-87e3-6c90e2a897df/volumes" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.001904 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4be1796-a941-4648-b20d-abe63dab37fd" path="/var/lib/kubelet/pods/a4be1796-a941-4648-b20d-abe63dab37fd/volumes" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.244902 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.351320 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-utilities\") pod \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\" (UID: \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\") " Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.352357 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-catalog-content\") pod \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\" (UID: \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\") " Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.352491 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hvdt\" (UniqueName: \"kubernetes.io/projected/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-kube-api-access-7hvdt\") pod \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\" (UID: \"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da\") " Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.352267 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-utilities" (OuterVolumeSpecName: "utilities") pod "a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" (UID: "a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.357857 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-kube-api-access-7hvdt" (OuterVolumeSpecName: "kube-api-access-7hvdt") pod "a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" (UID: "a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da"). InnerVolumeSpecName "kube-api-access-7hvdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.427667 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" (UID: "a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.453815 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.454022 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hvdt\" (UniqueName: \"kubernetes.io/projected/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-kube-api-access-7hvdt\") on node \"crc\" DevicePath \"\"" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.454098 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.812967 5055 generic.go:334] "Generic (PLEG): container finished" podID="a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" containerID="d654024e5b195e355f1eebc3ec2e9d6e9718aaa93c0a775ec66db3c7e719f2b4" exitCode=0 Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.813025 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-drmwq" event={"ID":"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da","Type":"ContainerDied","Data":"d654024e5b195e355f1eebc3ec2e9d6e9718aaa93c0a775ec66db3c7e719f2b4"} Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.813502 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-drmwq" event={"ID":"a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da","Type":"ContainerDied","Data":"1226e910b7179b27806a4333658958b87cc49b85917f35c870f799eb63bab712"} Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.813104 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-drmwq" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.813563 5055 scope.go:117] "RemoveContainer" containerID="d654024e5b195e355f1eebc3ec2e9d6e9718aaa93c0a775ec66db3c7e719f2b4" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.852817 5055 scope.go:117] "RemoveContainer" containerID="4bdf4916a6718e8735ffb3d8e8e5da77e11ce6328b6c5d84d95b225cd249203b" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.858749 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-drmwq"] Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.864050 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-drmwq"] Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.875035 5055 scope.go:117] "RemoveContainer" containerID="71b58bb23f3d6bd9fa7e14bc766da1173ab2d8ef15dbe1cb87eb0f6f55517435" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.896589 5055 scope.go:117] "RemoveContainer" containerID="d654024e5b195e355f1eebc3ec2e9d6e9718aaa93c0a775ec66db3c7e719f2b4" Oct 11 06:57:25 crc kubenswrapper[5055]: E1011 06:57:25.897181 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d654024e5b195e355f1eebc3ec2e9d6e9718aaa93c0a775ec66db3c7e719f2b4\": container with ID starting with d654024e5b195e355f1eebc3ec2e9d6e9718aaa93c0a775ec66db3c7e719f2b4 not found: ID does not exist" containerID="d654024e5b195e355f1eebc3ec2e9d6e9718aaa93c0a775ec66db3c7e719f2b4" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.897255 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d654024e5b195e355f1eebc3ec2e9d6e9718aaa93c0a775ec66db3c7e719f2b4"} err="failed to get container status \"d654024e5b195e355f1eebc3ec2e9d6e9718aaa93c0a775ec66db3c7e719f2b4\": rpc error: code = NotFound desc = could not find container \"d654024e5b195e355f1eebc3ec2e9d6e9718aaa93c0a775ec66db3c7e719f2b4\": container with ID starting with d654024e5b195e355f1eebc3ec2e9d6e9718aaa93c0a775ec66db3c7e719f2b4 not found: ID does not exist" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.897312 5055 scope.go:117] "RemoveContainer" containerID="4bdf4916a6718e8735ffb3d8e8e5da77e11ce6328b6c5d84d95b225cd249203b" Oct 11 06:57:25 crc kubenswrapper[5055]: E1011 06:57:25.897786 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bdf4916a6718e8735ffb3d8e8e5da77e11ce6328b6c5d84d95b225cd249203b\": container with ID starting with 4bdf4916a6718e8735ffb3d8e8e5da77e11ce6328b6c5d84d95b225cd249203b not found: ID does not exist" containerID="4bdf4916a6718e8735ffb3d8e8e5da77e11ce6328b6c5d84d95b225cd249203b" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.897823 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bdf4916a6718e8735ffb3d8e8e5da77e11ce6328b6c5d84d95b225cd249203b"} err="failed to get container status \"4bdf4916a6718e8735ffb3d8e8e5da77e11ce6328b6c5d84d95b225cd249203b\": rpc error: code = NotFound desc = could not find container \"4bdf4916a6718e8735ffb3d8e8e5da77e11ce6328b6c5d84d95b225cd249203b\": container with ID starting with 4bdf4916a6718e8735ffb3d8e8e5da77e11ce6328b6c5d84d95b225cd249203b not found: ID does not exist" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.897850 5055 scope.go:117] "RemoveContainer" containerID="71b58bb23f3d6bd9fa7e14bc766da1173ab2d8ef15dbe1cb87eb0f6f55517435" Oct 11 06:57:25 crc kubenswrapper[5055]: E1011 06:57:25.898181 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71b58bb23f3d6bd9fa7e14bc766da1173ab2d8ef15dbe1cb87eb0f6f55517435\": container with ID starting with 71b58bb23f3d6bd9fa7e14bc766da1173ab2d8ef15dbe1cb87eb0f6f55517435 not found: ID does not exist" containerID="71b58bb23f3d6bd9fa7e14bc766da1173ab2d8ef15dbe1cb87eb0f6f55517435" Oct 11 06:57:25 crc kubenswrapper[5055]: I1011 06:57:25.898208 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71b58bb23f3d6bd9fa7e14bc766da1173ab2d8ef15dbe1cb87eb0f6f55517435"} err="failed to get container status \"71b58bb23f3d6bd9fa7e14bc766da1173ab2d8ef15dbe1cb87eb0f6f55517435\": rpc error: code = NotFound desc = could not find container \"71b58bb23f3d6bd9fa7e14bc766da1173ab2d8ef15dbe1cb87eb0f6f55517435\": container with ID starting with 71b58bb23f3d6bd9fa7e14bc766da1173ab2d8ef15dbe1cb87eb0f6f55517435 not found: ID does not exist" Oct 11 06:57:27 crc kubenswrapper[5055]: I1011 06:57:27.004307 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" path="/var/lib/kubelet/pods/a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da/volumes" Oct 11 06:57:37 crc kubenswrapper[5055]: I1011 06:57:37.381610 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-f5tqq"] Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.423147 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" podUID="b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" containerName="oauth-openshift" containerID="cri-o://21f5aac3a921c1afc7eb42ac14d98d7ba0312b1bd30527221e0be1e9825bdce9" gracePeriod=15 Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.833938 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.871175 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-758c4c8f95-82hld"] Oct 11 06:58:02 crc kubenswrapper[5055]: E1011 06:58:02.877273 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48eaf223-88fa-49e9-98e8-c971092d0fd8" containerName="extract-utilities" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877310 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="48eaf223-88fa-49e9-98e8-c971092d0fd8" containerName="extract-utilities" Oct 11 06:58:02 crc kubenswrapper[5055]: E1011 06:58:02.877359 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" containerName="registry-server" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877370 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" containerName="registry-server" Oct 11 06:58:02 crc kubenswrapper[5055]: E1011 06:58:02.877543 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48eaf223-88fa-49e9-98e8-c971092d0fd8" containerName="extract-content" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877554 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="48eaf223-88fa-49e9-98e8-c971092d0fd8" containerName="extract-content" Oct 11 06:58:02 crc kubenswrapper[5055]: E1011 06:58:02.877566 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4be1796-a941-4648-b20d-abe63dab37fd" containerName="registry-server" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877575 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4be1796-a941-4648-b20d-abe63dab37fd" containerName="registry-server" Oct 11 06:58:02 crc kubenswrapper[5055]: E1011 06:58:02.877586 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cbf151b-75a5-48a9-87e3-6c90e2a897df" containerName="registry-server" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877597 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cbf151b-75a5-48a9-87e3-6c90e2a897df" containerName="registry-server" Oct 11 06:58:02 crc kubenswrapper[5055]: E1011 06:58:02.877610 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48eaf223-88fa-49e9-98e8-c971092d0fd8" containerName="registry-server" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877619 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="48eaf223-88fa-49e9-98e8-c971092d0fd8" containerName="registry-server" Oct 11 06:58:02 crc kubenswrapper[5055]: E1011 06:58:02.877633 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" containerName="extract-content" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877641 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" containerName="extract-content" Oct 11 06:58:02 crc kubenswrapper[5055]: E1011 06:58:02.877653 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cbf151b-75a5-48a9-87e3-6c90e2a897df" containerName="extract-utilities" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877662 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cbf151b-75a5-48a9-87e3-6c90e2a897df" containerName="extract-utilities" Oct 11 06:58:02 crc kubenswrapper[5055]: E1011 06:58:02.877674 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4be1796-a941-4648-b20d-abe63dab37fd" containerName="extract-utilities" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877683 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4be1796-a941-4648-b20d-abe63dab37fd" containerName="extract-utilities" Oct 11 06:58:02 crc kubenswrapper[5055]: E1011 06:58:02.877699 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cbf151b-75a5-48a9-87e3-6c90e2a897df" containerName="extract-content" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877707 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cbf151b-75a5-48a9-87e3-6c90e2a897df" containerName="extract-content" Oct 11 06:58:02 crc kubenswrapper[5055]: E1011 06:58:02.877716 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" containerName="oauth-openshift" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877723 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" containerName="oauth-openshift" Oct 11 06:58:02 crc kubenswrapper[5055]: E1011 06:58:02.877737 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4be1796-a941-4648-b20d-abe63dab37fd" containerName="extract-content" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877745 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4be1796-a941-4648-b20d-abe63dab37fd" containerName="extract-content" Oct 11 06:58:02 crc kubenswrapper[5055]: E1011 06:58:02.877755 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" containerName="extract-utilities" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877784 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" containerName="extract-utilities" Oct 11 06:58:02 crc kubenswrapper[5055]: E1011 06:58:02.877818 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="090781c6-fd6b-4385-adce-a13671655e1b" containerName="pruner" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877836 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="090781c6-fd6b-4385-adce-a13671655e1b" containerName="pruner" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877977 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cbf151b-75a5-48a9-87e3-6c90e2a897df" containerName="registry-server" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.877991 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="48eaf223-88fa-49e9-98e8-c971092d0fd8" containerName="registry-server" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.878002 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4be1796-a941-4648-b20d-abe63dab37fd" containerName="registry-server" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.878022 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" containerName="oauth-openshift" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.878032 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1e9f40e-8f93-4dd9-9c7f-e37175a4d9da" containerName="registry-server" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.878042 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="090781c6-fd6b-4385-adce-a13671655e1b" containerName="pruner" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.878579 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.881742 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-758c4c8f95-82hld"] Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.936842 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-error\") pod \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.936915 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-session\") pod \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.936959 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-serving-cert\") pod \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.936984 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-cliconfig\") pod \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.937013 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-trusted-ca-bundle\") pod \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.937045 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-audit-dir\") pod \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.937096 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-service-ca\") pod \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.937128 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-router-certs\") pod \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.937177 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-login\") pod \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.937222 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-idp-0-file-data\") pod \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.937252 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n7x8v\" (UniqueName: \"kubernetes.io/projected/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-kube-api-access-n7x8v\") pod \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.937297 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-provider-selection\") pod \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.937325 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-ocp-branding-template\") pod \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.937360 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-audit-policies\") pod \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\" (UID: \"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497\") " Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.938195 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" (UID: "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.938893 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" (UID: "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.939367 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" (UID: "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.939501 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" (UID: "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.939986 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" (UID: "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.944568 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" (UID: "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.945004 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" (UID: "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.945394 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" (UID: "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.945465 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" (UID: "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.945692 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" (UID: "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.945844 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" (UID: "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.946022 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" (UID: "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.946318 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" (UID: "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:58:02 crc kubenswrapper[5055]: I1011 06:58:02.958234 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-kube-api-access-n7x8v" (OuterVolumeSpecName: "kube-api-access-n7x8v") pod "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" (UID: "b7d98b8e-92a0-4d90-9b8d-5ad1087f7497"). InnerVolumeSpecName "kube-api-access-n7x8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.000113 5055 generic.go:334] "Generic (PLEG): container finished" podID="b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" containerID="21f5aac3a921c1afc7eb42ac14d98d7ba0312b1bd30527221e0be1e9825bdce9" exitCode=0 Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.000209 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.000781 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" event={"ID":"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497","Type":"ContainerDied","Data":"21f5aac3a921c1afc7eb42ac14d98d7ba0312b1bd30527221e0be1e9825bdce9"} Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.000822 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-f5tqq" event={"ID":"b7d98b8e-92a0-4d90-9b8d-5ad1087f7497","Type":"ContainerDied","Data":"f29d8197ba8e988692f296cd630ddfd8c95679ea7df62045f8032d403053a84e"} Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.000839 5055 scope.go:117] "RemoveContainer" containerID="21f5aac3a921c1afc7eb42ac14d98d7ba0312b1bd30527221e0be1e9825bdce9" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.024227 5055 scope.go:117] "RemoveContainer" containerID="21f5aac3a921c1afc7eb42ac14d98d7ba0312b1bd30527221e0be1e9825bdce9" Oct 11 06:58:03 crc kubenswrapper[5055]: E1011 06:58:03.024685 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21f5aac3a921c1afc7eb42ac14d98d7ba0312b1bd30527221e0be1e9825bdce9\": container with ID starting with 21f5aac3a921c1afc7eb42ac14d98d7ba0312b1bd30527221e0be1e9825bdce9 not found: ID does not exist" containerID="21f5aac3a921c1afc7eb42ac14d98d7ba0312b1bd30527221e0be1e9825bdce9" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.024718 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21f5aac3a921c1afc7eb42ac14d98d7ba0312b1bd30527221e0be1e9825bdce9"} err="failed to get container status \"21f5aac3a921c1afc7eb42ac14d98d7ba0312b1bd30527221e0be1e9825bdce9\": rpc error: code = NotFound desc = could not find container \"21f5aac3a921c1afc7eb42ac14d98d7ba0312b1bd30527221e0be1e9825bdce9\": container with ID starting with 21f5aac3a921c1afc7eb42ac14d98d7ba0312b1bd30527221e0be1e9825bdce9 not found: ID does not exist" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.041494 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-session\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.041621 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-user-template-error\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.041685 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-serving-cert\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.041709 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.041746 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.043546 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-f5tqq"] Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.043631 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-router-certs\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.043699 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.043753 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-cliconfig\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047052 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-audit-dir\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047120 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-user-template-login\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047189 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-service-ca\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047262 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047290 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-audit-policies\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047311 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqd8t\" (UniqueName: \"kubernetes.io/projected/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-kube-api-access-bqd8t\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047428 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047441 5055 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047453 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047468 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047479 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047488 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047497 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047510 5055 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-audit-dir\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047519 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047529 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047538 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047551 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047560 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n7x8v\" (UniqueName: \"kubernetes.io/projected/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-kube-api-access-n7x8v\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047571 5055 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.047707 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-f5tqq"] Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148061 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-service-ca\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148128 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148158 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-audit-policies\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148181 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqd8t\" (UniqueName: \"kubernetes.io/projected/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-kube-api-access-bqd8t\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148232 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-session\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148273 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-user-template-error\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148295 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-serving-cert\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148324 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148349 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148375 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-router-certs\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148401 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148424 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-cliconfig\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148446 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-audit-dir\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148467 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-user-template-login\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.148825 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-audit-dir\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.149147 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-audit-policies\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.149285 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.149322 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-cliconfig\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.149696 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-service-ca\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.151866 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-user-template-error\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.151873 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.151982 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.152736 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.153032 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-user-template-login\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.154007 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-router-certs\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.154217 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-session\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.157670 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-v4-0-config-system-serving-cert\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.169484 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqd8t\" (UniqueName: \"kubernetes.io/projected/61214a58-dce9-4e9b-9f90-7b8a6f8fac81-kube-api-access-bqd8t\") pod \"oauth-openshift-758c4c8f95-82hld\" (UID: \"61214a58-dce9-4e9b-9f90-7b8a6f8fac81\") " pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.199116 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:03 crc kubenswrapper[5055]: I1011 06:58:03.578532 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-758c4c8f95-82hld"] Oct 11 06:58:04 crc kubenswrapper[5055]: I1011 06:58:04.009124 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" event={"ID":"61214a58-dce9-4e9b-9f90-7b8a6f8fac81","Type":"ContainerStarted","Data":"875a63d676dc065a48d5f28dce29ac3c1483ef9f2245c5119a3e168aa3197bf9"} Oct 11 06:58:04 crc kubenswrapper[5055]: I1011 06:58:04.009208 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" event={"ID":"61214a58-dce9-4e9b-9f90-7b8a6f8fac81","Type":"ContainerStarted","Data":"b495e2279e0d69fb9d7b01f6a25996a1b4f2ff147b40f662d972a12cfed10ad5"} Oct 11 06:58:04 crc kubenswrapper[5055]: I1011 06:58:04.009337 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:04 crc kubenswrapper[5055]: I1011 06:58:04.033782 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" Oct 11 06:58:04 crc kubenswrapper[5055]: I1011 06:58:04.034022 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-758c4c8f95-82hld" podStartSLOduration=27.033993616 podStartE2EDuration="27.033993616s" podCreationTimestamp="2025-10-11 06:57:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:58:04.032432889 +0000 UTC m=+267.806706696" watchObservedRunningTime="2025-10-11 06:58:04.033993616 +0000 UTC m=+267.808267423" Oct 11 06:58:05 crc kubenswrapper[5055]: I1011 06:58:05.000108 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7d98b8e-92a0-4d90-9b8d-5ad1087f7497" path="/var/lib/kubelet/pods/b7d98b8e-92a0-4d90-9b8d-5ad1087f7497/volumes" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.135430 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p9vsc"] Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.136042 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-p9vsc" podUID="4a62c31d-0b47-44e5-992b-7c9f99997384" containerName="registry-server" containerID="cri-o://3471493adeadb25a4fc102dc0da9d141206d98487b6912f1ce9ce2a31f05f923" gracePeriod=30 Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.146444 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8njcl"] Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.146742 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8njcl" podUID="e917fbc6-7142-47bd-bd50-43d15fc8376c" containerName="registry-server" containerID="cri-o://37c1c3311da896f56e0635921d74a136e2e05530da91f8a7cc2cbfca0b2f960e" gracePeriod=30 Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.159903 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fflht"] Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.160638 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-fflht" podUID="be40637a-a178-4125-958b-c1bc36dee57d" containerName="marketplace-operator" containerID="cri-o://b1b5ef3530935f892acb9a5acf67edba1e967c5eb3758998822fd4094f9445b5" gracePeriod=30 Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.173035 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rsr9"] Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.173271 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4rsr9" podUID="5185075e-f5f6-4244-a81d-6f0943eabaf3" containerName="registry-server" containerID="cri-o://d63e6a0e1b2c53a369157a82f99845ea96f6358b601ae93fea5e3b919375b040" gracePeriod=30 Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.181881 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-slk95"] Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.182740 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.191817 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5j92b"] Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.192067 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5j92b" podUID="8d42319b-9bfe-4681-8e7f-dd085d3aef16" containerName="registry-server" containerID="cri-o://70833a01fcaf8df77b2e82b0d79a1320a7bb797e6e8266f53445b778214d3d4a" gracePeriod=30 Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.195622 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-slk95"] Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.238207 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpg7x\" (UniqueName: \"kubernetes.io/projected/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-kube-api-access-kpg7x\") pod \"marketplace-operator-79b997595-slk95\" (UID: \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.238350 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-slk95\" (UID: \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.238378 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-slk95\" (UID: \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.339580 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-slk95\" (UID: \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.339638 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-slk95\" (UID: \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.339673 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpg7x\" (UniqueName: \"kubernetes.io/projected/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-kube-api-access-kpg7x\") pod \"marketplace-operator-79b997595-slk95\" (UID: \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.341419 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-slk95\" (UID: \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.352275 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-slk95\" (UID: \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.369909 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpg7x\" (UniqueName: \"kubernetes.io/projected/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-kube-api-access-kpg7x\") pod \"marketplace-operator-79b997595-slk95\" (UID: \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\") " pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.585355 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.587362 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.616232 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.629020 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.637059 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.642662 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a62c31d-0b47-44e5-992b-7c9f99997384-catalog-content\") pod \"4a62c31d-0b47-44e5-992b-7c9f99997384\" (UID: \"4a62c31d-0b47-44e5-992b-7c9f99997384\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.642695 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5185075e-f5f6-4244-a81d-6f0943eabaf3-catalog-content\") pod \"5185075e-f5f6-4244-a81d-6f0943eabaf3\" (UID: \"5185075e-f5f6-4244-a81d-6f0943eabaf3\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.642724 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/be40637a-a178-4125-958b-c1bc36dee57d-marketplace-trusted-ca\") pod \"be40637a-a178-4125-958b-c1bc36dee57d\" (UID: \"be40637a-a178-4125-958b-c1bc36dee57d\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.642746 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e917fbc6-7142-47bd-bd50-43d15fc8376c-utilities\") pod \"e917fbc6-7142-47bd-bd50-43d15fc8376c\" (UID: \"e917fbc6-7142-47bd-bd50-43d15fc8376c\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.642781 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmmf4\" (UniqueName: \"kubernetes.io/projected/5185075e-f5f6-4244-a81d-6f0943eabaf3-kube-api-access-tmmf4\") pod \"5185075e-f5f6-4244-a81d-6f0943eabaf3\" (UID: \"5185075e-f5f6-4244-a81d-6f0943eabaf3\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.642797 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a62c31d-0b47-44e5-992b-7c9f99997384-utilities\") pod \"4a62c31d-0b47-44e5-992b-7c9f99997384\" (UID: \"4a62c31d-0b47-44e5-992b-7c9f99997384\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.642817 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnqcq\" (UniqueName: \"kubernetes.io/projected/e917fbc6-7142-47bd-bd50-43d15fc8376c-kube-api-access-dnqcq\") pod \"e917fbc6-7142-47bd-bd50-43d15fc8376c\" (UID: \"e917fbc6-7142-47bd-bd50-43d15fc8376c\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.642842 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/be40637a-a178-4125-958b-c1bc36dee57d-marketplace-operator-metrics\") pod \"be40637a-a178-4125-958b-c1bc36dee57d\" (UID: \"be40637a-a178-4125-958b-c1bc36dee57d\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.642865 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fsttl\" (UniqueName: \"kubernetes.io/projected/4a62c31d-0b47-44e5-992b-7c9f99997384-kube-api-access-fsttl\") pod \"4a62c31d-0b47-44e5-992b-7c9f99997384\" (UID: \"4a62c31d-0b47-44e5-992b-7c9f99997384\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.642884 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e917fbc6-7142-47bd-bd50-43d15fc8376c-catalog-content\") pod \"e917fbc6-7142-47bd-bd50-43d15fc8376c\" (UID: \"e917fbc6-7142-47bd-bd50-43d15fc8376c\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.642899 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kft9\" (UniqueName: \"kubernetes.io/projected/be40637a-a178-4125-958b-c1bc36dee57d-kube-api-access-2kft9\") pod \"be40637a-a178-4125-958b-c1bc36dee57d\" (UID: \"be40637a-a178-4125-958b-c1bc36dee57d\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.642913 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5185075e-f5f6-4244-a81d-6f0943eabaf3-utilities\") pod \"5185075e-f5f6-4244-a81d-6f0943eabaf3\" (UID: \"5185075e-f5f6-4244-a81d-6f0943eabaf3\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.643706 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e917fbc6-7142-47bd-bd50-43d15fc8376c-utilities" (OuterVolumeSpecName: "utilities") pod "e917fbc6-7142-47bd-bd50-43d15fc8376c" (UID: "e917fbc6-7142-47bd-bd50-43d15fc8376c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.644380 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be40637a-a178-4125-958b-c1bc36dee57d-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "be40637a-a178-4125-958b-c1bc36dee57d" (UID: "be40637a-a178-4125-958b-c1bc36dee57d"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.645080 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5185075e-f5f6-4244-a81d-6f0943eabaf3-utilities" (OuterVolumeSpecName: "utilities") pod "5185075e-f5f6-4244-a81d-6f0943eabaf3" (UID: "5185075e-f5f6-4244-a81d-6f0943eabaf3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.647905 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a62c31d-0b47-44e5-992b-7c9f99997384-utilities" (OuterVolumeSpecName: "utilities") pod "4a62c31d-0b47-44e5-992b-7c9f99997384" (UID: "4a62c31d-0b47-44e5-992b-7c9f99997384"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.648661 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5185075e-f5f6-4244-a81d-6f0943eabaf3-kube-api-access-tmmf4" (OuterVolumeSpecName: "kube-api-access-tmmf4") pod "5185075e-f5f6-4244-a81d-6f0943eabaf3" (UID: "5185075e-f5f6-4244-a81d-6f0943eabaf3"). InnerVolumeSpecName "kube-api-access-tmmf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.649184 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be40637a-a178-4125-958b-c1bc36dee57d-kube-api-access-2kft9" (OuterVolumeSpecName: "kube-api-access-2kft9") pod "be40637a-a178-4125-958b-c1bc36dee57d" (UID: "be40637a-a178-4125-958b-c1bc36dee57d"). InnerVolumeSpecName "kube-api-access-2kft9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.651028 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be40637a-a178-4125-958b-c1bc36dee57d-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "be40637a-a178-4125-958b-c1bc36dee57d" (UID: "be40637a-a178-4125-958b-c1bc36dee57d"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.651206 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a62c31d-0b47-44e5-992b-7c9f99997384-kube-api-access-fsttl" (OuterVolumeSpecName: "kube-api-access-fsttl") pod "4a62c31d-0b47-44e5-992b-7c9f99997384" (UID: "4a62c31d-0b47-44e5-992b-7c9f99997384"). InnerVolumeSpecName "kube-api-access-fsttl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.651201 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e917fbc6-7142-47bd-bd50-43d15fc8376c-kube-api-access-dnqcq" (OuterVolumeSpecName: "kube-api-access-dnqcq") pod "e917fbc6-7142-47bd-bd50-43d15fc8376c" (UID: "e917fbc6-7142-47bd-bd50-43d15fc8376c"). InnerVolumeSpecName "kube-api-access-dnqcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.658905 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.665306 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5185075e-f5f6-4244-a81d-6f0943eabaf3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5185075e-f5f6-4244-a81d-6f0943eabaf3" (UID: "5185075e-f5f6-4244-a81d-6f0943eabaf3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.711428 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a62c31d-0b47-44e5-992b-7c9f99997384-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4a62c31d-0b47-44e5-992b-7c9f99997384" (UID: "4a62c31d-0b47-44e5-992b-7c9f99997384"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.737316 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e917fbc6-7142-47bd-bd50-43d15fc8376c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e917fbc6-7142-47bd-bd50-43d15fc8376c" (UID: "e917fbc6-7142-47bd-bd50-43d15fc8376c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.746803 5055 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/be40637a-a178-4125-958b-c1bc36dee57d-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.746843 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fsttl\" (UniqueName: \"kubernetes.io/projected/4a62c31d-0b47-44e5-992b-7c9f99997384-kube-api-access-fsttl\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.746881 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e917fbc6-7142-47bd-bd50-43d15fc8376c-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.746897 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kft9\" (UniqueName: \"kubernetes.io/projected/be40637a-a178-4125-958b-c1bc36dee57d-kube-api-access-2kft9\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.746912 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5185075e-f5f6-4244-a81d-6f0943eabaf3-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.746926 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a62c31d-0b47-44e5-992b-7c9f99997384-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.746951 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5185075e-f5f6-4244-a81d-6f0943eabaf3-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.747017 5055 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/be40637a-a178-4125-958b-c1bc36dee57d-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.747050 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e917fbc6-7142-47bd-bd50-43d15fc8376c-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.747064 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmmf4\" (UniqueName: \"kubernetes.io/projected/5185075e-f5f6-4244-a81d-6f0943eabaf3-kube-api-access-tmmf4\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.747076 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a62c31d-0b47-44e5-992b-7c9f99997384-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.747088 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnqcq\" (UniqueName: \"kubernetes.io/projected/e917fbc6-7142-47bd-bd50-43d15fc8376c-kube-api-access-dnqcq\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.810612 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-slk95"] Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.848217 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nx9l\" (UniqueName: \"kubernetes.io/projected/8d42319b-9bfe-4681-8e7f-dd085d3aef16-kube-api-access-5nx9l\") pod \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\" (UID: \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.848637 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d42319b-9bfe-4681-8e7f-dd085d3aef16-utilities\") pod \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\" (UID: \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.848699 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d42319b-9bfe-4681-8e7f-dd085d3aef16-catalog-content\") pod \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\" (UID: \"8d42319b-9bfe-4681-8e7f-dd085d3aef16\") " Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.849574 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d42319b-9bfe-4681-8e7f-dd085d3aef16-utilities" (OuterVolumeSpecName: "utilities") pod "8d42319b-9bfe-4681-8e7f-dd085d3aef16" (UID: "8d42319b-9bfe-4681-8e7f-dd085d3aef16"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.855805 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d42319b-9bfe-4681-8e7f-dd085d3aef16-kube-api-access-5nx9l" (OuterVolumeSpecName: "kube-api-access-5nx9l") pod "8d42319b-9bfe-4681-8e7f-dd085d3aef16" (UID: "8d42319b-9bfe-4681-8e7f-dd085d3aef16"). InnerVolumeSpecName "kube-api-access-5nx9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.947043 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d42319b-9bfe-4681-8e7f-dd085d3aef16-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8d42319b-9bfe-4681-8e7f-dd085d3aef16" (UID: "8d42319b-9bfe-4681-8e7f-dd085d3aef16"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.951303 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nx9l\" (UniqueName: \"kubernetes.io/projected/8d42319b-9bfe-4681-8e7f-dd085d3aef16-kube-api-access-5nx9l\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.951346 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d42319b-9bfe-4681-8e7f-dd085d3aef16-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:35 crc kubenswrapper[5055]: I1011 06:58:35.951357 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d42319b-9bfe-4681-8e7f-dd085d3aef16-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.174016 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-slk95" event={"ID":"1821ec16-ef47-498d-a4cf-f72b5afcc7b0","Type":"ContainerStarted","Data":"a387875d6d84f6bee6a3f27d2e82d22a879757e89df7b12b003953fdfa9251e2"} Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.175278 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.175381 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-slk95" event={"ID":"1821ec16-ef47-498d-a4cf-f72b5afcc7b0","Type":"ContainerStarted","Data":"3c5dcad0b53816b9d89a626764b048e30f6256c9fd146b6643696126113e6e51"} Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.176463 5055 generic.go:334] "Generic (PLEG): container finished" podID="4a62c31d-0b47-44e5-992b-7c9f99997384" containerID="3471493adeadb25a4fc102dc0da9d141206d98487b6912f1ce9ce2a31f05f923" exitCode=0 Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.176555 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p9vsc" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.178371 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p9vsc" event={"ID":"4a62c31d-0b47-44e5-992b-7c9f99997384","Type":"ContainerDied","Data":"3471493adeadb25a4fc102dc0da9d141206d98487b6912f1ce9ce2a31f05f923"} Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.178434 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p9vsc" event={"ID":"4a62c31d-0b47-44e5-992b-7c9f99997384","Type":"ContainerDied","Data":"d78e3836cb9e0c48faf33db2d298ceb096a42344291be5d5585a19c662adf433"} Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.178460 5055 scope.go:117] "RemoveContainer" containerID="3471493adeadb25a4fc102dc0da9d141206d98487b6912f1ce9ce2a31f05f923" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.178790 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.180632 5055 generic.go:334] "Generic (PLEG): container finished" podID="e917fbc6-7142-47bd-bd50-43d15fc8376c" containerID="37c1c3311da896f56e0635921d74a136e2e05530da91f8a7cc2cbfca0b2f960e" exitCode=0 Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.180686 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8njcl" event={"ID":"e917fbc6-7142-47bd-bd50-43d15fc8376c","Type":"ContainerDied","Data":"37c1c3311da896f56e0635921d74a136e2e05530da91f8a7cc2cbfca0b2f960e"} Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.180708 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8njcl" event={"ID":"e917fbc6-7142-47bd-bd50-43d15fc8376c","Type":"ContainerDied","Data":"30b80c68fdba54ccd42b528a1493282265cfbd4a6f9756d6a63c2fbececa5ee3"} Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.180813 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8njcl" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.183122 5055 generic.go:334] "Generic (PLEG): container finished" podID="be40637a-a178-4125-958b-c1bc36dee57d" containerID="b1b5ef3530935f892acb9a5acf67edba1e967c5eb3758998822fd4094f9445b5" exitCode=0 Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.183304 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fflht" event={"ID":"be40637a-a178-4125-958b-c1bc36dee57d","Type":"ContainerDied","Data":"b1b5ef3530935f892acb9a5acf67edba1e967c5eb3758998822fd4094f9445b5"} Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.183439 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-fflht" event={"ID":"be40637a-a178-4125-958b-c1bc36dee57d","Type":"ContainerDied","Data":"bcb2434dc072ba2114ac568561996993dce1c7a597b3266f2ccac5c4df2b49e9"} Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.183588 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-fflht" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.193488 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-slk95" podStartSLOduration=1.19347216 podStartE2EDuration="1.19347216s" podCreationTimestamp="2025-10-11 06:58:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 06:58:36.191586213 +0000 UTC m=+299.965860020" watchObservedRunningTime="2025-10-11 06:58:36.19347216 +0000 UTC m=+299.967745967" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.197528 5055 scope.go:117] "RemoveContainer" containerID="66f3e96750362e8650a28dcadf25ac5fbedbb0a38c352575562b4a4f248714da" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.198385 5055 generic.go:334] "Generic (PLEG): container finished" podID="5185075e-f5f6-4244-a81d-6f0943eabaf3" containerID="d63e6a0e1b2c53a369157a82f99845ea96f6358b601ae93fea5e3b919375b040" exitCode=0 Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.198465 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4rsr9" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.198470 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rsr9" event={"ID":"5185075e-f5f6-4244-a81d-6f0943eabaf3","Type":"ContainerDied","Data":"d63e6a0e1b2c53a369157a82f99845ea96f6358b601ae93fea5e3b919375b040"} Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.198635 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rsr9" event={"ID":"5185075e-f5f6-4244-a81d-6f0943eabaf3","Type":"ContainerDied","Data":"a1335831eea0e4827e83c98f2171472e1c3a9be30abd9d237ea9f5e0d552a22d"} Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.200607 5055 generic.go:334] "Generic (PLEG): container finished" podID="8d42319b-9bfe-4681-8e7f-dd085d3aef16" containerID="70833a01fcaf8df77b2e82b0d79a1320a7bb797e6e8266f53445b778214d3d4a" exitCode=0 Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.200648 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5j92b" event={"ID":"8d42319b-9bfe-4681-8e7f-dd085d3aef16","Type":"ContainerDied","Data":"70833a01fcaf8df77b2e82b0d79a1320a7bb797e6e8266f53445b778214d3d4a"} Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.200677 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5j92b" event={"ID":"8d42319b-9bfe-4681-8e7f-dd085d3aef16","Type":"ContainerDied","Data":"04021612f7af9976b8f93c48f6b108bba6c56c87d42e0f21e46f8f43709816a7"} Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.200751 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5j92b" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.231096 5055 scope.go:117] "RemoveContainer" containerID="5e1f335f6a216c73faff831500bbe67c05869ee918fe00b2f70ea3077157b7e0" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.254008 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p9vsc"] Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.277705 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-p9vsc"] Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.280365 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fflht"] Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.283265 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-fflht"] Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.283572 5055 scope.go:117] "RemoveContainer" containerID="3471493adeadb25a4fc102dc0da9d141206d98487b6912f1ce9ce2a31f05f923" Oct 11 06:58:36 crc kubenswrapper[5055]: E1011 06:58:36.284293 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3471493adeadb25a4fc102dc0da9d141206d98487b6912f1ce9ce2a31f05f923\": container with ID starting with 3471493adeadb25a4fc102dc0da9d141206d98487b6912f1ce9ce2a31f05f923 not found: ID does not exist" containerID="3471493adeadb25a4fc102dc0da9d141206d98487b6912f1ce9ce2a31f05f923" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.284330 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3471493adeadb25a4fc102dc0da9d141206d98487b6912f1ce9ce2a31f05f923"} err="failed to get container status \"3471493adeadb25a4fc102dc0da9d141206d98487b6912f1ce9ce2a31f05f923\": rpc error: code = NotFound desc = could not find container \"3471493adeadb25a4fc102dc0da9d141206d98487b6912f1ce9ce2a31f05f923\": container with ID starting with 3471493adeadb25a4fc102dc0da9d141206d98487b6912f1ce9ce2a31f05f923 not found: ID does not exist" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.284356 5055 scope.go:117] "RemoveContainer" containerID="66f3e96750362e8650a28dcadf25ac5fbedbb0a38c352575562b4a4f248714da" Oct 11 06:58:36 crc kubenswrapper[5055]: E1011 06:58:36.285458 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66f3e96750362e8650a28dcadf25ac5fbedbb0a38c352575562b4a4f248714da\": container with ID starting with 66f3e96750362e8650a28dcadf25ac5fbedbb0a38c352575562b4a4f248714da not found: ID does not exist" containerID="66f3e96750362e8650a28dcadf25ac5fbedbb0a38c352575562b4a4f248714da" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.285484 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66f3e96750362e8650a28dcadf25ac5fbedbb0a38c352575562b4a4f248714da"} err="failed to get container status \"66f3e96750362e8650a28dcadf25ac5fbedbb0a38c352575562b4a4f248714da\": rpc error: code = NotFound desc = could not find container \"66f3e96750362e8650a28dcadf25ac5fbedbb0a38c352575562b4a4f248714da\": container with ID starting with 66f3e96750362e8650a28dcadf25ac5fbedbb0a38c352575562b4a4f248714da not found: ID does not exist" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.285507 5055 scope.go:117] "RemoveContainer" containerID="5e1f335f6a216c73faff831500bbe67c05869ee918fe00b2f70ea3077157b7e0" Oct 11 06:58:36 crc kubenswrapper[5055]: E1011 06:58:36.285887 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e1f335f6a216c73faff831500bbe67c05869ee918fe00b2f70ea3077157b7e0\": container with ID starting with 5e1f335f6a216c73faff831500bbe67c05869ee918fe00b2f70ea3077157b7e0 not found: ID does not exist" containerID="5e1f335f6a216c73faff831500bbe67c05869ee918fe00b2f70ea3077157b7e0" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.285907 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e1f335f6a216c73faff831500bbe67c05869ee918fe00b2f70ea3077157b7e0"} err="failed to get container status \"5e1f335f6a216c73faff831500bbe67c05869ee918fe00b2f70ea3077157b7e0\": rpc error: code = NotFound desc = could not find container \"5e1f335f6a216c73faff831500bbe67c05869ee918fe00b2f70ea3077157b7e0\": container with ID starting with 5e1f335f6a216c73faff831500bbe67c05869ee918fe00b2f70ea3077157b7e0 not found: ID does not exist" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.285920 5055 scope.go:117] "RemoveContainer" containerID="37c1c3311da896f56e0635921d74a136e2e05530da91f8a7cc2cbfca0b2f960e" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.285971 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8njcl"] Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.289337 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8njcl"] Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.303629 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5j92b"] Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.306012 5055 scope.go:117] "RemoveContainer" containerID="c69532fbe2b9c799b640de676a4f52663901c34059f03b0a3bc728ae71e68c80" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.308741 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5j92b"] Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.311124 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rsr9"] Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.317237 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rsr9"] Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.346002 5055 scope.go:117] "RemoveContainer" containerID="69d915da700647fa623a0e4d89b62866ab489b6a098788340a07e992502022d6" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.363205 5055 scope.go:117] "RemoveContainer" containerID="37c1c3311da896f56e0635921d74a136e2e05530da91f8a7cc2cbfca0b2f960e" Oct 11 06:58:36 crc kubenswrapper[5055]: E1011 06:58:36.363695 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37c1c3311da896f56e0635921d74a136e2e05530da91f8a7cc2cbfca0b2f960e\": container with ID starting with 37c1c3311da896f56e0635921d74a136e2e05530da91f8a7cc2cbfca0b2f960e not found: ID does not exist" containerID="37c1c3311da896f56e0635921d74a136e2e05530da91f8a7cc2cbfca0b2f960e" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.363757 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37c1c3311da896f56e0635921d74a136e2e05530da91f8a7cc2cbfca0b2f960e"} err="failed to get container status \"37c1c3311da896f56e0635921d74a136e2e05530da91f8a7cc2cbfca0b2f960e\": rpc error: code = NotFound desc = could not find container \"37c1c3311da896f56e0635921d74a136e2e05530da91f8a7cc2cbfca0b2f960e\": container with ID starting with 37c1c3311da896f56e0635921d74a136e2e05530da91f8a7cc2cbfca0b2f960e not found: ID does not exist" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.363932 5055 scope.go:117] "RemoveContainer" containerID="c69532fbe2b9c799b640de676a4f52663901c34059f03b0a3bc728ae71e68c80" Oct 11 06:58:36 crc kubenswrapper[5055]: E1011 06:58:36.364331 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c69532fbe2b9c799b640de676a4f52663901c34059f03b0a3bc728ae71e68c80\": container with ID starting with c69532fbe2b9c799b640de676a4f52663901c34059f03b0a3bc728ae71e68c80 not found: ID does not exist" containerID="c69532fbe2b9c799b640de676a4f52663901c34059f03b0a3bc728ae71e68c80" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.364358 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c69532fbe2b9c799b640de676a4f52663901c34059f03b0a3bc728ae71e68c80"} err="failed to get container status \"c69532fbe2b9c799b640de676a4f52663901c34059f03b0a3bc728ae71e68c80\": rpc error: code = NotFound desc = could not find container \"c69532fbe2b9c799b640de676a4f52663901c34059f03b0a3bc728ae71e68c80\": container with ID starting with c69532fbe2b9c799b640de676a4f52663901c34059f03b0a3bc728ae71e68c80 not found: ID does not exist" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.364413 5055 scope.go:117] "RemoveContainer" containerID="69d915da700647fa623a0e4d89b62866ab489b6a098788340a07e992502022d6" Oct 11 06:58:36 crc kubenswrapper[5055]: E1011 06:58:36.364729 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69d915da700647fa623a0e4d89b62866ab489b6a098788340a07e992502022d6\": container with ID starting with 69d915da700647fa623a0e4d89b62866ab489b6a098788340a07e992502022d6 not found: ID does not exist" containerID="69d915da700647fa623a0e4d89b62866ab489b6a098788340a07e992502022d6" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.364787 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69d915da700647fa623a0e4d89b62866ab489b6a098788340a07e992502022d6"} err="failed to get container status \"69d915da700647fa623a0e4d89b62866ab489b6a098788340a07e992502022d6\": rpc error: code = NotFound desc = could not find container \"69d915da700647fa623a0e4d89b62866ab489b6a098788340a07e992502022d6\": container with ID starting with 69d915da700647fa623a0e4d89b62866ab489b6a098788340a07e992502022d6 not found: ID does not exist" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.364820 5055 scope.go:117] "RemoveContainer" containerID="b1b5ef3530935f892acb9a5acf67edba1e967c5eb3758998822fd4094f9445b5" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.376709 5055 scope.go:117] "RemoveContainer" containerID="b1b5ef3530935f892acb9a5acf67edba1e967c5eb3758998822fd4094f9445b5" Oct 11 06:58:36 crc kubenswrapper[5055]: E1011 06:58:36.377213 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1b5ef3530935f892acb9a5acf67edba1e967c5eb3758998822fd4094f9445b5\": container with ID starting with b1b5ef3530935f892acb9a5acf67edba1e967c5eb3758998822fd4094f9445b5 not found: ID does not exist" containerID="b1b5ef3530935f892acb9a5acf67edba1e967c5eb3758998822fd4094f9445b5" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.377242 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1b5ef3530935f892acb9a5acf67edba1e967c5eb3758998822fd4094f9445b5"} err="failed to get container status \"b1b5ef3530935f892acb9a5acf67edba1e967c5eb3758998822fd4094f9445b5\": rpc error: code = NotFound desc = could not find container \"b1b5ef3530935f892acb9a5acf67edba1e967c5eb3758998822fd4094f9445b5\": container with ID starting with b1b5ef3530935f892acb9a5acf67edba1e967c5eb3758998822fd4094f9445b5 not found: ID does not exist" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.377271 5055 scope.go:117] "RemoveContainer" containerID="d63e6a0e1b2c53a369157a82f99845ea96f6358b601ae93fea5e3b919375b040" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.387738 5055 scope.go:117] "RemoveContainer" containerID="6600a46432ee4c0b9b50d34c69265de705f504835767b0e159cf33f6bccf8b4d" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.401830 5055 scope.go:117] "RemoveContainer" containerID="43751a38fa97735c7d95fe0af657816af7f8be213b28baa5c50b08e69417e320" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.423503 5055 scope.go:117] "RemoveContainer" containerID="d63e6a0e1b2c53a369157a82f99845ea96f6358b601ae93fea5e3b919375b040" Oct 11 06:58:36 crc kubenswrapper[5055]: E1011 06:58:36.423962 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d63e6a0e1b2c53a369157a82f99845ea96f6358b601ae93fea5e3b919375b040\": container with ID starting with d63e6a0e1b2c53a369157a82f99845ea96f6358b601ae93fea5e3b919375b040 not found: ID does not exist" containerID="d63e6a0e1b2c53a369157a82f99845ea96f6358b601ae93fea5e3b919375b040" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.424037 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d63e6a0e1b2c53a369157a82f99845ea96f6358b601ae93fea5e3b919375b040"} err="failed to get container status \"d63e6a0e1b2c53a369157a82f99845ea96f6358b601ae93fea5e3b919375b040\": rpc error: code = NotFound desc = could not find container \"d63e6a0e1b2c53a369157a82f99845ea96f6358b601ae93fea5e3b919375b040\": container with ID starting with d63e6a0e1b2c53a369157a82f99845ea96f6358b601ae93fea5e3b919375b040 not found: ID does not exist" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.424066 5055 scope.go:117] "RemoveContainer" containerID="6600a46432ee4c0b9b50d34c69265de705f504835767b0e159cf33f6bccf8b4d" Oct 11 06:58:36 crc kubenswrapper[5055]: E1011 06:58:36.424358 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6600a46432ee4c0b9b50d34c69265de705f504835767b0e159cf33f6bccf8b4d\": container with ID starting with 6600a46432ee4c0b9b50d34c69265de705f504835767b0e159cf33f6bccf8b4d not found: ID does not exist" containerID="6600a46432ee4c0b9b50d34c69265de705f504835767b0e159cf33f6bccf8b4d" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.424388 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6600a46432ee4c0b9b50d34c69265de705f504835767b0e159cf33f6bccf8b4d"} err="failed to get container status \"6600a46432ee4c0b9b50d34c69265de705f504835767b0e159cf33f6bccf8b4d\": rpc error: code = NotFound desc = could not find container \"6600a46432ee4c0b9b50d34c69265de705f504835767b0e159cf33f6bccf8b4d\": container with ID starting with 6600a46432ee4c0b9b50d34c69265de705f504835767b0e159cf33f6bccf8b4d not found: ID does not exist" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.424408 5055 scope.go:117] "RemoveContainer" containerID="43751a38fa97735c7d95fe0af657816af7f8be213b28baa5c50b08e69417e320" Oct 11 06:58:36 crc kubenswrapper[5055]: E1011 06:58:36.424712 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43751a38fa97735c7d95fe0af657816af7f8be213b28baa5c50b08e69417e320\": container with ID starting with 43751a38fa97735c7d95fe0af657816af7f8be213b28baa5c50b08e69417e320 not found: ID does not exist" containerID="43751a38fa97735c7d95fe0af657816af7f8be213b28baa5c50b08e69417e320" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.424760 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43751a38fa97735c7d95fe0af657816af7f8be213b28baa5c50b08e69417e320"} err="failed to get container status \"43751a38fa97735c7d95fe0af657816af7f8be213b28baa5c50b08e69417e320\": rpc error: code = NotFound desc = could not find container \"43751a38fa97735c7d95fe0af657816af7f8be213b28baa5c50b08e69417e320\": container with ID starting with 43751a38fa97735c7d95fe0af657816af7f8be213b28baa5c50b08e69417e320 not found: ID does not exist" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.424821 5055 scope.go:117] "RemoveContainer" containerID="70833a01fcaf8df77b2e82b0d79a1320a7bb797e6e8266f53445b778214d3d4a" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.439603 5055 scope.go:117] "RemoveContainer" containerID="d11b20da88526a66f16ee2b49ccd0fec0a37674cf15ca4740ba4f24cfbc29dbf" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.451841 5055 scope.go:117] "RemoveContainer" containerID="eb2cc3bb2fcb3479723c497c68da36018ca9ac96251904938f1be7b40f5b631c" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.462802 5055 scope.go:117] "RemoveContainer" containerID="70833a01fcaf8df77b2e82b0d79a1320a7bb797e6e8266f53445b778214d3d4a" Oct 11 06:58:36 crc kubenswrapper[5055]: E1011 06:58:36.463233 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70833a01fcaf8df77b2e82b0d79a1320a7bb797e6e8266f53445b778214d3d4a\": container with ID starting with 70833a01fcaf8df77b2e82b0d79a1320a7bb797e6e8266f53445b778214d3d4a not found: ID does not exist" containerID="70833a01fcaf8df77b2e82b0d79a1320a7bb797e6e8266f53445b778214d3d4a" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.463276 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70833a01fcaf8df77b2e82b0d79a1320a7bb797e6e8266f53445b778214d3d4a"} err="failed to get container status \"70833a01fcaf8df77b2e82b0d79a1320a7bb797e6e8266f53445b778214d3d4a\": rpc error: code = NotFound desc = could not find container \"70833a01fcaf8df77b2e82b0d79a1320a7bb797e6e8266f53445b778214d3d4a\": container with ID starting with 70833a01fcaf8df77b2e82b0d79a1320a7bb797e6e8266f53445b778214d3d4a not found: ID does not exist" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.463302 5055 scope.go:117] "RemoveContainer" containerID="d11b20da88526a66f16ee2b49ccd0fec0a37674cf15ca4740ba4f24cfbc29dbf" Oct 11 06:58:36 crc kubenswrapper[5055]: E1011 06:58:36.463622 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d11b20da88526a66f16ee2b49ccd0fec0a37674cf15ca4740ba4f24cfbc29dbf\": container with ID starting with d11b20da88526a66f16ee2b49ccd0fec0a37674cf15ca4740ba4f24cfbc29dbf not found: ID does not exist" containerID="d11b20da88526a66f16ee2b49ccd0fec0a37674cf15ca4740ba4f24cfbc29dbf" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.463673 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d11b20da88526a66f16ee2b49ccd0fec0a37674cf15ca4740ba4f24cfbc29dbf"} err="failed to get container status \"d11b20da88526a66f16ee2b49ccd0fec0a37674cf15ca4740ba4f24cfbc29dbf\": rpc error: code = NotFound desc = could not find container \"d11b20da88526a66f16ee2b49ccd0fec0a37674cf15ca4740ba4f24cfbc29dbf\": container with ID starting with d11b20da88526a66f16ee2b49ccd0fec0a37674cf15ca4740ba4f24cfbc29dbf not found: ID does not exist" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.463712 5055 scope.go:117] "RemoveContainer" containerID="eb2cc3bb2fcb3479723c497c68da36018ca9ac96251904938f1be7b40f5b631c" Oct 11 06:58:36 crc kubenswrapper[5055]: E1011 06:58:36.464121 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb2cc3bb2fcb3479723c497c68da36018ca9ac96251904938f1be7b40f5b631c\": container with ID starting with eb2cc3bb2fcb3479723c497c68da36018ca9ac96251904938f1be7b40f5b631c not found: ID does not exist" containerID="eb2cc3bb2fcb3479723c497c68da36018ca9ac96251904938f1be7b40f5b631c" Oct 11 06:58:36 crc kubenswrapper[5055]: I1011 06:58:36.464153 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb2cc3bb2fcb3479723c497c68da36018ca9ac96251904938f1be7b40f5b631c"} err="failed to get container status \"eb2cc3bb2fcb3479723c497c68da36018ca9ac96251904938f1be7b40f5b631c\": rpc error: code = NotFound desc = could not find container \"eb2cc3bb2fcb3479723c497c68da36018ca9ac96251904938f1be7b40f5b631c\": container with ID starting with eb2cc3bb2fcb3479723c497c68da36018ca9ac96251904938f1be7b40f5b631c not found: ID does not exist" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:36.999660 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a62c31d-0b47-44e5-992b-7c9f99997384" path="/var/lib/kubelet/pods/4a62c31d-0b47-44e5-992b-7c9f99997384/volumes" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.000698 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5185075e-f5f6-4244-a81d-6f0943eabaf3" path="/var/lib/kubelet/pods/5185075e-f5f6-4244-a81d-6f0943eabaf3/volumes" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.001419 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d42319b-9bfe-4681-8e7f-dd085d3aef16" path="/var/lib/kubelet/pods/8d42319b-9bfe-4681-8e7f-dd085d3aef16/volumes" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.002631 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be40637a-a178-4125-958b-c1bc36dee57d" path="/var/lib/kubelet/pods/be40637a-a178-4125-958b-c1bc36dee57d/volumes" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.003187 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e917fbc6-7142-47bd-bd50-43d15fc8376c" path="/var/lib/kubelet/pods/e917fbc6-7142-47bd-bd50-43d15fc8376c/volumes" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.352115 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gbjgh"] Oct 11 06:58:37 crc kubenswrapper[5055]: E1011 06:58:37.352609 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a62c31d-0b47-44e5-992b-7c9f99997384" containerName="extract-content" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.352634 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a62c31d-0b47-44e5-992b-7c9f99997384" containerName="extract-content" Oct 11 06:58:37 crc kubenswrapper[5055]: E1011 06:58:37.352691 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e917fbc6-7142-47bd-bd50-43d15fc8376c" containerName="extract-utilities" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.352705 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e917fbc6-7142-47bd-bd50-43d15fc8376c" containerName="extract-utilities" Oct 11 06:58:37 crc kubenswrapper[5055]: E1011 06:58:37.352719 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d42319b-9bfe-4681-8e7f-dd085d3aef16" containerName="registry-server" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.352778 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d42319b-9bfe-4681-8e7f-dd085d3aef16" containerName="registry-server" Oct 11 06:58:37 crc kubenswrapper[5055]: E1011 06:58:37.352800 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5185075e-f5f6-4244-a81d-6f0943eabaf3" containerName="extract-content" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.352814 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="5185075e-f5f6-4244-a81d-6f0943eabaf3" containerName="extract-content" Oct 11 06:58:37 crc kubenswrapper[5055]: E1011 06:58:37.352870 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e917fbc6-7142-47bd-bd50-43d15fc8376c" containerName="extract-content" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.352882 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e917fbc6-7142-47bd-bd50-43d15fc8376c" containerName="extract-content" Oct 11 06:58:37 crc kubenswrapper[5055]: E1011 06:58:37.352899 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be40637a-a178-4125-958b-c1bc36dee57d" containerName="marketplace-operator" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.352974 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="be40637a-a178-4125-958b-c1bc36dee57d" containerName="marketplace-operator" Oct 11 06:58:37 crc kubenswrapper[5055]: E1011 06:58:37.352997 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d42319b-9bfe-4681-8e7f-dd085d3aef16" containerName="extract-content" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.353009 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d42319b-9bfe-4681-8e7f-dd085d3aef16" containerName="extract-content" Oct 11 06:58:37 crc kubenswrapper[5055]: E1011 06:58:37.353057 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5185075e-f5f6-4244-a81d-6f0943eabaf3" containerName="registry-server" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.353074 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="5185075e-f5f6-4244-a81d-6f0943eabaf3" containerName="registry-server" Oct 11 06:58:37 crc kubenswrapper[5055]: E1011 06:58:37.353091 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a62c31d-0b47-44e5-992b-7c9f99997384" containerName="registry-server" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.353104 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a62c31d-0b47-44e5-992b-7c9f99997384" containerName="registry-server" Oct 11 06:58:37 crc kubenswrapper[5055]: E1011 06:58:37.353155 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a62c31d-0b47-44e5-992b-7c9f99997384" containerName="extract-utilities" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.353169 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a62c31d-0b47-44e5-992b-7c9f99997384" containerName="extract-utilities" Oct 11 06:58:37 crc kubenswrapper[5055]: E1011 06:58:37.353188 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e917fbc6-7142-47bd-bd50-43d15fc8376c" containerName="registry-server" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.353904 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e917fbc6-7142-47bd-bd50-43d15fc8376c" containerName="registry-server" Oct 11 06:58:37 crc kubenswrapper[5055]: E1011 06:58:37.353929 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5185075e-f5f6-4244-a81d-6f0943eabaf3" containerName="extract-utilities" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.353942 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="5185075e-f5f6-4244-a81d-6f0943eabaf3" containerName="extract-utilities" Oct 11 06:58:37 crc kubenswrapper[5055]: E1011 06:58:37.353997 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d42319b-9bfe-4681-8e7f-dd085d3aef16" containerName="extract-utilities" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.354011 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d42319b-9bfe-4681-8e7f-dd085d3aef16" containerName="extract-utilities" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.354269 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="e917fbc6-7142-47bd-bd50-43d15fc8376c" containerName="registry-server" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.354299 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="be40637a-a178-4125-958b-c1bc36dee57d" containerName="marketplace-operator" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.354317 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a62c31d-0b47-44e5-992b-7c9f99997384" containerName="registry-server" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.354370 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="5185075e-f5f6-4244-a81d-6f0943eabaf3" containerName="registry-server" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.354388 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d42319b-9bfe-4681-8e7f-dd085d3aef16" containerName="registry-server" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.357906 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.360242 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.360482 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gbjgh"] Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.364879 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfnbb\" (UniqueName: \"kubernetes.io/projected/76714795-e16a-4a12-adca-70e428270dd0-kube-api-access-qfnbb\") pod \"redhat-marketplace-gbjgh\" (UID: \"76714795-e16a-4a12-adca-70e428270dd0\") " pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.364981 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76714795-e16a-4a12-adca-70e428270dd0-catalog-content\") pod \"redhat-marketplace-gbjgh\" (UID: \"76714795-e16a-4a12-adca-70e428270dd0\") " pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.365026 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76714795-e16a-4a12-adca-70e428270dd0-utilities\") pod \"redhat-marketplace-gbjgh\" (UID: \"76714795-e16a-4a12-adca-70e428270dd0\") " pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.465882 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfnbb\" (UniqueName: \"kubernetes.io/projected/76714795-e16a-4a12-adca-70e428270dd0-kube-api-access-qfnbb\") pod \"redhat-marketplace-gbjgh\" (UID: \"76714795-e16a-4a12-adca-70e428270dd0\") " pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.465948 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76714795-e16a-4a12-adca-70e428270dd0-catalog-content\") pod \"redhat-marketplace-gbjgh\" (UID: \"76714795-e16a-4a12-adca-70e428270dd0\") " pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.465980 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76714795-e16a-4a12-adca-70e428270dd0-utilities\") pod \"redhat-marketplace-gbjgh\" (UID: \"76714795-e16a-4a12-adca-70e428270dd0\") " pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.466336 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76714795-e16a-4a12-adca-70e428270dd0-utilities\") pod \"redhat-marketplace-gbjgh\" (UID: \"76714795-e16a-4a12-adca-70e428270dd0\") " pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.466467 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76714795-e16a-4a12-adca-70e428270dd0-catalog-content\") pod \"redhat-marketplace-gbjgh\" (UID: \"76714795-e16a-4a12-adca-70e428270dd0\") " pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.492318 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfnbb\" (UniqueName: \"kubernetes.io/projected/76714795-e16a-4a12-adca-70e428270dd0-kube-api-access-qfnbb\") pod \"redhat-marketplace-gbjgh\" (UID: \"76714795-e16a-4a12-adca-70e428270dd0\") " pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.551908 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8n7bz"] Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.552885 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.554713 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.564712 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8n7bz"] Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.567181 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-catalog-content\") pod \"certified-operators-8n7bz\" (UID: \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\") " pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.567260 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6z7h\" (UniqueName: \"kubernetes.io/projected/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-kube-api-access-g6z7h\") pod \"certified-operators-8n7bz\" (UID: \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\") " pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.567311 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-utilities\") pod \"certified-operators-8n7bz\" (UID: \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\") " pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.667859 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6z7h\" (UniqueName: \"kubernetes.io/projected/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-kube-api-access-g6z7h\") pod \"certified-operators-8n7bz\" (UID: \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\") " pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.667917 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-utilities\") pod \"certified-operators-8n7bz\" (UID: \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\") " pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.667980 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-catalog-content\") pod \"certified-operators-8n7bz\" (UID: \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\") " pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.668787 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-utilities\") pod \"certified-operators-8n7bz\" (UID: \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\") " pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.668843 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-catalog-content\") pod \"certified-operators-8n7bz\" (UID: \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\") " pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.686528 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6z7h\" (UniqueName: \"kubernetes.io/projected/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-kube-api-access-g6z7h\") pod \"certified-operators-8n7bz\" (UID: \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\") " pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.722326 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.873830 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:37 crc kubenswrapper[5055]: I1011 06:58:37.891255 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gbjgh"] Oct 11 06:58:37 crc kubenswrapper[5055]: W1011 06:58:37.897844 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod76714795_e16a_4a12_adca_70e428270dd0.slice/crio-7f3ce7e7f176d638fa81a4d88adec6b0b7d4a8e7db4eb8f1c7bb3700eb3cab48 WatchSource:0}: Error finding container 7f3ce7e7f176d638fa81a4d88adec6b0b7d4a8e7db4eb8f1c7bb3700eb3cab48: Status 404 returned error can't find the container with id 7f3ce7e7f176d638fa81a4d88adec6b0b7d4a8e7db4eb8f1c7bb3700eb3cab48 Oct 11 06:58:38 crc kubenswrapper[5055]: I1011 06:58:38.044947 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8n7bz"] Oct 11 06:58:38 crc kubenswrapper[5055]: W1011 06:58:38.099539 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e7a03ef_3f5d_43be_9dd4_d375c36898e9.slice/crio-e98ca9a0c7a18847491cd4f0edc70d0fc4f2481e6e46b1db34ef6d0227a162da WatchSource:0}: Error finding container e98ca9a0c7a18847491cd4f0edc70d0fc4f2481e6e46b1db34ef6d0227a162da: Status 404 returned error can't find the container with id e98ca9a0c7a18847491cd4f0edc70d0fc4f2481e6e46b1db34ef6d0227a162da Oct 11 06:58:38 crc kubenswrapper[5055]: I1011 06:58:38.209979 5055 generic.go:334] "Generic (PLEG): container finished" podID="0e7a03ef-3f5d-43be-9dd4-d375c36898e9" containerID="bdfbf6499eacebbd2302df4494462b6af1ba11994292a36742a125c15251af6c" exitCode=0 Oct 11 06:58:38 crc kubenswrapper[5055]: I1011 06:58:38.210028 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8n7bz" event={"ID":"0e7a03ef-3f5d-43be-9dd4-d375c36898e9","Type":"ContainerDied","Data":"bdfbf6499eacebbd2302df4494462b6af1ba11994292a36742a125c15251af6c"} Oct 11 06:58:38 crc kubenswrapper[5055]: I1011 06:58:38.210092 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8n7bz" event={"ID":"0e7a03ef-3f5d-43be-9dd4-d375c36898e9","Type":"ContainerStarted","Data":"e98ca9a0c7a18847491cd4f0edc70d0fc4f2481e6e46b1db34ef6d0227a162da"} Oct 11 06:58:38 crc kubenswrapper[5055]: I1011 06:58:38.211933 5055 generic.go:334] "Generic (PLEG): container finished" podID="76714795-e16a-4a12-adca-70e428270dd0" containerID="48f0c97ecd25437f6451d97cc35c2b3b3754bce21c67f204305dc89dc36a3c07" exitCode=0 Oct 11 06:58:38 crc kubenswrapper[5055]: I1011 06:58:38.212070 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gbjgh" event={"ID":"76714795-e16a-4a12-adca-70e428270dd0","Type":"ContainerDied","Data":"48f0c97ecd25437f6451d97cc35c2b3b3754bce21c67f204305dc89dc36a3c07"} Oct 11 06:58:38 crc kubenswrapper[5055]: I1011 06:58:38.212144 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gbjgh" event={"ID":"76714795-e16a-4a12-adca-70e428270dd0","Type":"ContainerStarted","Data":"7f3ce7e7f176d638fa81a4d88adec6b0b7d4a8e7db4eb8f1c7bb3700eb3cab48"} Oct 11 06:58:39 crc kubenswrapper[5055]: I1011 06:58:39.218985 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8n7bz" event={"ID":"0e7a03ef-3f5d-43be-9dd4-d375c36898e9","Type":"ContainerStarted","Data":"f6a550b63b129e7d91f1a976b8b51c44316c82fa97f31d737401bf4659034c44"} Oct 11 06:58:39 crc kubenswrapper[5055]: I1011 06:58:39.221805 5055 generic.go:334] "Generic (PLEG): container finished" podID="76714795-e16a-4a12-adca-70e428270dd0" containerID="78ef318c54e4e91ead301df5c98700d1f92387130cd5a72ec59642eec852d3f3" exitCode=0 Oct 11 06:58:39 crc kubenswrapper[5055]: I1011 06:58:39.221847 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gbjgh" event={"ID":"76714795-e16a-4a12-adca-70e428270dd0","Type":"ContainerDied","Data":"78ef318c54e4e91ead301df5c98700d1f92387130cd5a72ec59642eec852d3f3"} Oct 11 06:58:39 crc kubenswrapper[5055]: I1011 06:58:39.750103 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bqxlg"] Oct 11 06:58:39 crc kubenswrapper[5055]: I1011 06:58:39.752249 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:39 crc kubenswrapper[5055]: I1011 06:58:39.756425 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Oct 11 06:58:39 crc kubenswrapper[5055]: I1011 06:58:39.788185 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bqxlg"] Oct 11 06:58:39 crc kubenswrapper[5055]: I1011 06:58:39.901049 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9e3839-fc39-4efc-9aaf-ff4554f45935-catalog-content\") pod \"community-operators-bqxlg\" (UID: \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\") " pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:39 crc kubenswrapper[5055]: I1011 06:58:39.901106 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9e3839-fc39-4efc-9aaf-ff4554f45935-utilities\") pod \"community-operators-bqxlg\" (UID: \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\") " pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:39 crc kubenswrapper[5055]: I1011 06:58:39.901169 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8nbk\" (UniqueName: \"kubernetes.io/projected/3d9e3839-fc39-4efc-9aaf-ff4554f45935-kube-api-access-n8nbk\") pod \"community-operators-bqxlg\" (UID: \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\") " pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:39 crc kubenswrapper[5055]: I1011 06:58:39.952348 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rm5wf"] Oct 11 06:58:39 crc kubenswrapper[5055]: I1011 06:58:39.953461 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:58:39 crc kubenswrapper[5055]: I1011 06:58:39.955907 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Oct 11 06:58:39 crc kubenswrapper[5055]: I1011 06:58:39.966238 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rm5wf"] Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.002521 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9e3839-fc39-4efc-9aaf-ff4554f45935-catalog-content\") pod \"community-operators-bqxlg\" (UID: \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\") " pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.002587 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdsfs\" (UniqueName: \"kubernetes.io/projected/05199d34-a391-4e72-b9b3-7864f988c137-kube-api-access-xdsfs\") pod \"redhat-operators-rm5wf\" (UID: \"05199d34-a391-4e72-b9b3-7864f988c137\") " pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.002629 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9e3839-fc39-4efc-9aaf-ff4554f45935-utilities\") pod \"community-operators-bqxlg\" (UID: \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\") " pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.002650 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8nbk\" (UniqueName: \"kubernetes.io/projected/3d9e3839-fc39-4efc-9aaf-ff4554f45935-kube-api-access-n8nbk\") pod \"community-operators-bqxlg\" (UID: \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\") " pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.002797 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05199d34-a391-4e72-b9b3-7864f988c137-catalog-content\") pod \"redhat-operators-rm5wf\" (UID: \"05199d34-a391-4e72-b9b3-7864f988c137\") " pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.002841 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05199d34-a391-4e72-b9b3-7864f988c137-utilities\") pod \"redhat-operators-rm5wf\" (UID: \"05199d34-a391-4e72-b9b3-7864f988c137\") " pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.003009 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9e3839-fc39-4efc-9aaf-ff4554f45935-catalog-content\") pod \"community-operators-bqxlg\" (UID: \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\") " pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.003073 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9e3839-fc39-4efc-9aaf-ff4554f45935-utilities\") pod \"community-operators-bqxlg\" (UID: \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\") " pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.027117 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8nbk\" (UniqueName: \"kubernetes.io/projected/3d9e3839-fc39-4efc-9aaf-ff4554f45935-kube-api-access-n8nbk\") pod \"community-operators-bqxlg\" (UID: \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\") " pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.105043 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdsfs\" (UniqueName: \"kubernetes.io/projected/05199d34-a391-4e72-b9b3-7864f988c137-kube-api-access-xdsfs\") pod \"redhat-operators-rm5wf\" (UID: \"05199d34-a391-4e72-b9b3-7864f988c137\") " pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.105547 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05199d34-a391-4e72-b9b3-7864f988c137-catalog-content\") pod \"redhat-operators-rm5wf\" (UID: \"05199d34-a391-4e72-b9b3-7864f988c137\") " pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.105642 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05199d34-a391-4e72-b9b3-7864f988c137-utilities\") pod \"redhat-operators-rm5wf\" (UID: \"05199d34-a391-4e72-b9b3-7864f988c137\") " pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.106200 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05199d34-a391-4e72-b9b3-7864f988c137-utilities\") pod \"redhat-operators-rm5wf\" (UID: \"05199d34-a391-4e72-b9b3-7864f988c137\") " pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.106536 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05199d34-a391-4e72-b9b3-7864f988c137-catalog-content\") pod \"redhat-operators-rm5wf\" (UID: \"05199d34-a391-4e72-b9b3-7864f988c137\") " pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.122481 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdsfs\" (UniqueName: \"kubernetes.io/projected/05199d34-a391-4e72-b9b3-7864f988c137-kube-api-access-xdsfs\") pod \"redhat-operators-rm5wf\" (UID: \"05199d34-a391-4e72-b9b3-7864f988c137\") " pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.136311 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.226459 5055 generic.go:334] "Generic (PLEG): container finished" podID="0e7a03ef-3f5d-43be-9dd4-d375c36898e9" containerID="f6a550b63b129e7d91f1a976b8b51c44316c82fa97f31d737401bf4659034c44" exitCode=0 Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.226530 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8n7bz" event={"ID":"0e7a03ef-3f5d-43be-9dd4-d375c36898e9","Type":"ContainerDied","Data":"f6a550b63b129e7d91f1a976b8b51c44316c82fa97f31d737401bf4659034c44"} Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.230053 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gbjgh" event={"ID":"76714795-e16a-4a12-adca-70e428270dd0","Type":"ContainerStarted","Data":"3c02583cfa1ad23f58a6e7cc23c4ddb2dd55a0c305c5c9539076186e7911e8a0"} Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.270194 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.274073 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gbjgh" podStartSLOduration=1.7185727050000001 podStartE2EDuration="3.274043742s" podCreationTimestamp="2025-10-11 06:58:37 +0000 UTC" firstStartedPulling="2025-10-11 06:58:38.212940366 +0000 UTC m=+301.987214163" lastFinishedPulling="2025-10-11 06:58:39.768411393 +0000 UTC m=+303.542685200" observedRunningTime="2025-10-11 06:58:40.26802654 +0000 UTC m=+304.042300347" watchObservedRunningTime="2025-10-11 06:58:40.274043742 +0000 UTC m=+304.048317549" Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.468303 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rm5wf"] Oct 11 06:58:40 crc kubenswrapper[5055]: I1011 06:58:40.662702 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bqxlg"] Oct 11 06:58:41 crc kubenswrapper[5055]: I1011 06:58:41.237680 5055 generic.go:334] "Generic (PLEG): container finished" podID="05199d34-a391-4e72-b9b3-7864f988c137" containerID="ae71dd4ef5e4096a53b0e4c8765127c64b27e649e354a8433d403aed89a16372" exitCode=0 Oct 11 06:58:41 crc kubenswrapper[5055]: I1011 06:58:41.238021 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rm5wf" event={"ID":"05199d34-a391-4e72-b9b3-7864f988c137","Type":"ContainerDied","Data":"ae71dd4ef5e4096a53b0e4c8765127c64b27e649e354a8433d403aed89a16372"} Oct 11 06:58:41 crc kubenswrapper[5055]: I1011 06:58:41.238050 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rm5wf" event={"ID":"05199d34-a391-4e72-b9b3-7864f988c137","Type":"ContainerStarted","Data":"3a18833b632dd2568e2a30c9f2eb2dbbd7ee899f0975b74da41c77522f294dd9"} Oct 11 06:58:41 crc kubenswrapper[5055]: I1011 06:58:41.240824 5055 generic.go:334] "Generic (PLEG): container finished" podID="3d9e3839-fc39-4efc-9aaf-ff4554f45935" containerID="e347b53d8923f7d7a91e5016e38c6e7e48c4e39520eb709923756644a08f5dfc" exitCode=0 Oct 11 06:58:41 crc kubenswrapper[5055]: I1011 06:58:41.240868 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bqxlg" event={"ID":"3d9e3839-fc39-4efc-9aaf-ff4554f45935","Type":"ContainerDied","Data":"e347b53d8923f7d7a91e5016e38c6e7e48c4e39520eb709923756644a08f5dfc"} Oct 11 06:58:41 crc kubenswrapper[5055]: I1011 06:58:41.240884 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bqxlg" event={"ID":"3d9e3839-fc39-4efc-9aaf-ff4554f45935","Type":"ContainerStarted","Data":"2aff22011331f5a8876481c7a72f6b9c4ea37993d9a26907abcacea7bf94b40f"} Oct 11 06:58:41 crc kubenswrapper[5055]: I1011 06:58:41.244851 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8n7bz" event={"ID":"0e7a03ef-3f5d-43be-9dd4-d375c36898e9","Type":"ContainerStarted","Data":"4d0dc7e35f7464a6caae8fd6d615f772cffc64a603c98080519ebf4e4413c6c7"} Oct 11 06:58:41 crc kubenswrapper[5055]: I1011 06:58:41.288366 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8n7bz" podStartSLOduration=1.598672613 podStartE2EDuration="4.288339054s" podCreationTimestamp="2025-10-11 06:58:37 +0000 UTC" firstStartedPulling="2025-10-11 06:58:38.2111128 +0000 UTC m=+301.985386607" lastFinishedPulling="2025-10-11 06:58:40.900779241 +0000 UTC m=+304.675053048" observedRunningTime="2025-10-11 06:58:41.283530728 +0000 UTC m=+305.057804545" watchObservedRunningTime="2025-10-11 06:58:41.288339054 +0000 UTC m=+305.062612861" Oct 11 06:58:42 crc kubenswrapper[5055]: I1011 06:58:42.250568 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rm5wf" event={"ID":"05199d34-a391-4e72-b9b3-7864f988c137","Type":"ContainerStarted","Data":"c5071fbb1ae6940a3a0444eb28d2fc65eb1cfc967e2d7770a101ceb19030b6ea"} Oct 11 06:58:42 crc kubenswrapper[5055]: I1011 06:58:42.252559 5055 generic.go:334] "Generic (PLEG): container finished" podID="3d9e3839-fc39-4efc-9aaf-ff4554f45935" containerID="abe838f65da8c18d6a01688ebbfeb37af5bab36e86ea8ff4930276acbf40b75f" exitCode=0 Oct 11 06:58:42 crc kubenswrapper[5055]: I1011 06:58:42.253347 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bqxlg" event={"ID":"3d9e3839-fc39-4efc-9aaf-ff4554f45935","Type":"ContainerDied","Data":"abe838f65da8c18d6a01688ebbfeb37af5bab36e86ea8ff4930276acbf40b75f"} Oct 11 06:58:43 crc kubenswrapper[5055]: I1011 06:58:43.259515 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bqxlg" event={"ID":"3d9e3839-fc39-4efc-9aaf-ff4554f45935","Type":"ContainerStarted","Data":"c9f4446a16fede46b698c9689ec30937c847a994d29f342eb136e4eb77129672"} Oct 11 06:58:43 crc kubenswrapper[5055]: I1011 06:58:43.261301 5055 generic.go:334] "Generic (PLEG): container finished" podID="05199d34-a391-4e72-b9b3-7864f988c137" containerID="c5071fbb1ae6940a3a0444eb28d2fc65eb1cfc967e2d7770a101ceb19030b6ea" exitCode=0 Oct 11 06:58:43 crc kubenswrapper[5055]: I1011 06:58:43.261342 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rm5wf" event={"ID":"05199d34-a391-4e72-b9b3-7864f988c137","Type":"ContainerDied","Data":"c5071fbb1ae6940a3a0444eb28d2fc65eb1cfc967e2d7770a101ceb19030b6ea"} Oct 11 06:58:43 crc kubenswrapper[5055]: I1011 06:58:43.273873 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bqxlg" podStartSLOduration=2.79588422 podStartE2EDuration="4.273845359s" podCreationTimestamp="2025-10-11 06:58:39 +0000 UTC" firstStartedPulling="2025-10-11 06:58:41.242239827 +0000 UTC m=+305.016513624" lastFinishedPulling="2025-10-11 06:58:42.720200956 +0000 UTC m=+306.494474763" observedRunningTime="2025-10-11 06:58:43.272968523 +0000 UTC m=+307.047242330" watchObservedRunningTime="2025-10-11 06:58:43.273845359 +0000 UTC m=+307.048119156" Oct 11 06:58:44 crc kubenswrapper[5055]: I1011 06:58:44.285793 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rm5wf" event={"ID":"05199d34-a391-4e72-b9b3-7864f988c137","Type":"ContainerStarted","Data":"a2c471ac93fa17c491ea757c3212a201fb7e06b5d36c6ce1bf0ebb57518f111d"} Oct 11 06:58:44 crc kubenswrapper[5055]: I1011 06:58:44.301874 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rm5wf" podStartSLOduration=2.876100792 podStartE2EDuration="5.301855606s" podCreationTimestamp="2025-10-11 06:58:39 +0000 UTC" firstStartedPulling="2025-10-11 06:58:41.23969072 +0000 UTC m=+305.013964527" lastFinishedPulling="2025-10-11 06:58:43.665445534 +0000 UTC m=+307.439719341" observedRunningTime="2025-10-11 06:58:44.300264158 +0000 UTC m=+308.074537965" watchObservedRunningTime="2025-10-11 06:58:44.301855606 +0000 UTC m=+308.076129423" Oct 11 06:58:47 crc kubenswrapper[5055]: I1011 06:58:47.723224 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:47 crc kubenswrapper[5055]: I1011 06:58:47.723738 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:47 crc kubenswrapper[5055]: I1011 06:58:47.769637 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:47 crc kubenswrapper[5055]: I1011 06:58:47.874481 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:47 crc kubenswrapper[5055]: I1011 06:58:47.874530 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:47 crc kubenswrapper[5055]: I1011 06:58:47.910394 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:48 crc kubenswrapper[5055]: I1011 06:58:48.352505 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 06:58:48 crc kubenswrapper[5055]: I1011 06:58:48.358934 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 06:58:50 crc kubenswrapper[5055]: I1011 06:58:50.137194 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:50 crc kubenswrapper[5055]: I1011 06:58:50.137261 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:50 crc kubenswrapper[5055]: I1011 06:58:50.179596 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:50 crc kubenswrapper[5055]: I1011 06:58:50.271570 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:58:50 crc kubenswrapper[5055]: I1011 06:58:50.271611 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:58:50 crc kubenswrapper[5055]: I1011 06:58:50.314749 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:58:50 crc kubenswrapper[5055]: I1011 06:58:50.351919 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bqxlg" Oct 11 06:58:50 crc kubenswrapper[5055]: I1011 06:58:50.356750 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 06:59:32 crc kubenswrapper[5055]: I1011 06:59:32.422826 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 06:59:32 crc kubenswrapper[5055]: I1011 06:59:32.423373 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.136163 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2"] Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.137803 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.139988 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.140274 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.146682 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2"] Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.163494 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d42f8ea2-1177-4851-9a44-452ef9b701b9-config-volume\") pod \"collect-profiles-29336100-5zjw2\" (UID: \"d42f8ea2-1177-4851-9a44-452ef9b701b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.163577 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdbbk\" (UniqueName: \"kubernetes.io/projected/d42f8ea2-1177-4851-9a44-452ef9b701b9-kube-api-access-rdbbk\") pod \"collect-profiles-29336100-5zjw2\" (UID: \"d42f8ea2-1177-4851-9a44-452ef9b701b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.163701 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d42f8ea2-1177-4851-9a44-452ef9b701b9-secret-volume\") pod \"collect-profiles-29336100-5zjw2\" (UID: \"d42f8ea2-1177-4851-9a44-452ef9b701b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.264442 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d42f8ea2-1177-4851-9a44-452ef9b701b9-config-volume\") pod \"collect-profiles-29336100-5zjw2\" (UID: \"d42f8ea2-1177-4851-9a44-452ef9b701b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.264495 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdbbk\" (UniqueName: \"kubernetes.io/projected/d42f8ea2-1177-4851-9a44-452ef9b701b9-kube-api-access-rdbbk\") pod \"collect-profiles-29336100-5zjw2\" (UID: \"d42f8ea2-1177-4851-9a44-452ef9b701b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.264540 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d42f8ea2-1177-4851-9a44-452ef9b701b9-secret-volume\") pod \"collect-profiles-29336100-5zjw2\" (UID: \"d42f8ea2-1177-4851-9a44-452ef9b701b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.265414 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d42f8ea2-1177-4851-9a44-452ef9b701b9-config-volume\") pod \"collect-profiles-29336100-5zjw2\" (UID: \"d42f8ea2-1177-4851-9a44-452ef9b701b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.270644 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d42f8ea2-1177-4851-9a44-452ef9b701b9-secret-volume\") pod \"collect-profiles-29336100-5zjw2\" (UID: \"d42f8ea2-1177-4851-9a44-452ef9b701b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.281054 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdbbk\" (UniqueName: \"kubernetes.io/projected/d42f8ea2-1177-4851-9a44-452ef9b701b9-kube-api-access-rdbbk\") pod \"collect-profiles-29336100-5zjw2\" (UID: \"d42f8ea2-1177-4851-9a44-452ef9b701b9\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.478540 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.640974 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2"] Oct 11 07:00:00 crc kubenswrapper[5055]: W1011 07:00:00.652531 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd42f8ea2_1177_4851_9a44_452ef9b701b9.slice/crio-d38d9eec7bb25b8059501c76d2815bfe7abc2416200af9b9ea45a7999edd579f WatchSource:0}: Error finding container d38d9eec7bb25b8059501c76d2815bfe7abc2416200af9b9ea45a7999edd579f: Status 404 returned error can't find the container with id d38d9eec7bb25b8059501c76d2815bfe7abc2416200af9b9ea45a7999edd579f Oct 11 07:00:00 crc kubenswrapper[5055]: I1011 07:00:00.690449 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" event={"ID":"d42f8ea2-1177-4851-9a44-452ef9b701b9","Type":"ContainerStarted","Data":"d38d9eec7bb25b8059501c76d2815bfe7abc2416200af9b9ea45a7999edd579f"} Oct 11 07:00:01 crc kubenswrapper[5055]: I1011 07:00:01.696699 5055 generic.go:334] "Generic (PLEG): container finished" podID="d42f8ea2-1177-4851-9a44-452ef9b701b9" containerID="c261bc982101a5b94c8faab8960c5713e8b6646ffe1bcf3fde44a2fc00ff21ad" exitCode=0 Oct 11 07:00:01 crc kubenswrapper[5055]: I1011 07:00:01.696748 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" event={"ID":"d42f8ea2-1177-4851-9a44-452ef9b701b9","Type":"ContainerDied","Data":"c261bc982101a5b94c8faab8960c5713e8b6646ffe1bcf3fde44a2fc00ff21ad"} Oct 11 07:00:02 crc kubenswrapper[5055]: I1011 07:00:02.423122 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:00:02 crc kubenswrapper[5055]: I1011 07:00:02.423421 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:00:02 crc kubenswrapper[5055]: I1011 07:00:02.916725 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" Oct 11 07:00:03 crc kubenswrapper[5055]: I1011 07:00:03.003207 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d42f8ea2-1177-4851-9a44-452ef9b701b9-secret-volume\") pod \"d42f8ea2-1177-4851-9a44-452ef9b701b9\" (UID: \"d42f8ea2-1177-4851-9a44-452ef9b701b9\") " Oct 11 07:00:03 crc kubenswrapper[5055]: I1011 07:00:03.003269 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d42f8ea2-1177-4851-9a44-452ef9b701b9-config-volume\") pod \"d42f8ea2-1177-4851-9a44-452ef9b701b9\" (UID: \"d42f8ea2-1177-4851-9a44-452ef9b701b9\") " Oct 11 07:00:03 crc kubenswrapper[5055]: I1011 07:00:03.003305 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rdbbk\" (UniqueName: \"kubernetes.io/projected/d42f8ea2-1177-4851-9a44-452ef9b701b9-kube-api-access-rdbbk\") pod \"d42f8ea2-1177-4851-9a44-452ef9b701b9\" (UID: \"d42f8ea2-1177-4851-9a44-452ef9b701b9\") " Oct 11 07:00:03 crc kubenswrapper[5055]: I1011 07:00:03.004452 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d42f8ea2-1177-4851-9a44-452ef9b701b9-config-volume" (OuterVolumeSpecName: "config-volume") pod "d42f8ea2-1177-4851-9a44-452ef9b701b9" (UID: "d42f8ea2-1177-4851-9a44-452ef9b701b9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:00:03 crc kubenswrapper[5055]: I1011 07:00:03.008950 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d42f8ea2-1177-4851-9a44-452ef9b701b9-kube-api-access-rdbbk" (OuterVolumeSpecName: "kube-api-access-rdbbk") pod "d42f8ea2-1177-4851-9a44-452ef9b701b9" (UID: "d42f8ea2-1177-4851-9a44-452ef9b701b9"). InnerVolumeSpecName "kube-api-access-rdbbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:00:03 crc kubenswrapper[5055]: I1011 07:00:03.009178 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d42f8ea2-1177-4851-9a44-452ef9b701b9-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d42f8ea2-1177-4851-9a44-452ef9b701b9" (UID: "d42f8ea2-1177-4851-9a44-452ef9b701b9"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:00:03 crc kubenswrapper[5055]: I1011 07:00:03.104694 5055 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d42f8ea2-1177-4851-9a44-452ef9b701b9-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 07:00:03 crc kubenswrapper[5055]: I1011 07:00:03.104728 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rdbbk\" (UniqueName: \"kubernetes.io/projected/d42f8ea2-1177-4851-9a44-452ef9b701b9-kube-api-access-rdbbk\") on node \"crc\" DevicePath \"\"" Oct 11 07:00:03 crc kubenswrapper[5055]: I1011 07:00:03.104741 5055 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d42f8ea2-1177-4851-9a44-452ef9b701b9-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 07:00:03 crc kubenswrapper[5055]: I1011 07:00:03.718692 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" event={"ID":"d42f8ea2-1177-4851-9a44-452ef9b701b9","Type":"ContainerDied","Data":"d38d9eec7bb25b8059501c76d2815bfe7abc2416200af9b9ea45a7999edd579f"} Oct 11 07:00:03 crc kubenswrapper[5055]: I1011 07:00:03.718737 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d38d9eec7bb25b8059501c76d2815bfe7abc2416200af9b9ea45a7999edd579f" Oct 11 07:00:03 crc kubenswrapper[5055]: I1011 07:00:03.718803 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2" Oct 11 07:00:32 crc kubenswrapper[5055]: I1011 07:00:32.422044 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:00:32 crc kubenswrapper[5055]: I1011 07:00:32.422597 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:00:32 crc kubenswrapper[5055]: I1011 07:00:32.422647 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 07:00:32 crc kubenswrapper[5055]: I1011 07:00:32.423325 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0bef0585b62d67d040f6467382e7b6baaa0abbf7bfa19e2efffdf735fa6706f2"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 07:00:32 crc kubenswrapper[5055]: I1011 07:00:32.423383 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://0bef0585b62d67d040f6467382e7b6baaa0abbf7bfa19e2efffdf735fa6706f2" gracePeriod=600 Oct 11 07:00:32 crc kubenswrapper[5055]: I1011 07:00:32.870173 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="0bef0585b62d67d040f6467382e7b6baaa0abbf7bfa19e2efffdf735fa6706f2" exitCode=0 Oct 11 07:00:32 crc kubenswrapper[5055]: I1011 07:00:32.870254 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"0bef0585b62d67d040f6467382e7b6baaa0abbf7bfa19e2efffdf735fa6706f2"} Oct 11 07:00:32 crc kubenswrapper[5055]: I1011 07:00:32.870535 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"4f5499446c25e9b2790ad7e20935a2824f019b893c7b11657b4e851362251fba"} Oct 11 07:00:32 crc kubenswrapper[5055]: I1011 07:00:32.870558 5055 scope.go:117] "RemoveContainer" containerID="de5b4223fef0fd0703b3c25f185182bd5e1422bbd2bc259d8fdb951a1e05d33e" Oct 11 07:01:25 crc kubenswrapper[5055]: I1011 07:01:25.998843 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-hlzkn"] Oct 11 07:01:26 crc kubenswrapper[5055]: E1011 07:01:25.999621 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d42f8ea2-1177-4851-9a44-452ef9b701b9" containerName="collect-profiles" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:25.999636 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="d42f8ea2-1177-4851-9a44-452ef9b701b9" containerName="collect-profiles" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:25.999732 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="d42f8ea2-1177-4851-9a44-452ef9b701b9" containerName="collect-profiles" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.000161 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.016215 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-hlzkn"] Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.114542 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.114629 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/859d552e-d692-4bc9-a4ae-7539674ea119-registry-tls\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.114712 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/859d552e-d692-4bc9-a4ae-7539674ea119-registry-certificates\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.114787 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/859d552e-d692-4bc9-a4ae-7539674ea119-ca-trust-extracted\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.114965 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/859d552e-d692-4bc9-a4ae-7539674ea119-bound-sa-token\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.115026 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/859d552e-d692-4bc9-a4ae-7539674ea119-installation-pull-secrets\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.115825 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/859d552e-d692-4bc9-a4ae-7539674ea119-trusted-ca\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.115937 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zww7t\" (UniqueName: \"kubernetes.io/projected/859d552e-d692-4bc9-a4ae-7539674ea119-kube-api-access-zww7t\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.140423 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.217871 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/859d552e-d692-4bc9-a4ae-7539674ea119-registry-tls\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.217910 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/859d552e-d692-4bc9-a4ae-7539674ea119-registry-certificates\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.217935 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/859d552e-d692-4bc9-a4ae-7539674ea119-ca-trust-extracted\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.217957 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/859d552e-d692-4bc9-a4ae-7539674ea119-bound-sa-token\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.217975 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/859d552e-d692-4bc9-a4ae-7539674ea119-installation-pull-secrets\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.218005 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/859d552e-d692-4bc9-a4ae-7539674ea119-trusted-ca\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.218028 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zww7t\" (UniqueName: \"kubernetes.io/projected/859d552e-d692-4bc9-a4ae-7539674ea119-kube-api-access-zww7t\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.218577 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/859d552e-d692-4bc9-a4ae-7539674ea119-ca-trust-extracted\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.219693 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/859d552e-d692-4bc9-a4ae-7539674ea119-registry-certificates\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.219711 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/859d552e-d692-4bc9-a4ae-7539674ea119-trusted-ca\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.222850 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/859d552e-d692-4bc9-a4ae-7539674ea119-registry-tls\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.223184 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/859d552e-d692-4bc9-a4ae-7539674ea119-installation-pull-secrets\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.234014 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zww7t\" (UniqueName: \"kubernetes.io/projected/859d552e-d692-4bc9-a4ae-7539674ea119-kube-api-access-zww7t\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.235300 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/859d552e-d692-4bc9-a4ae-7539674ea119-bound-sa-token\") pod \"image-registry-66df7c8f76-hlzkn\" (UID: \"859d552e-d692-4bc9-a4ae-7539674ea119\") " pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.318093 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:26 crc kubenswrapper[5055]: I1011 07:01:26.496613 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-hlzkn"] Oct 11 07:01:27 crc kubenswrapper[5055]: I1011 07:01:27.159832 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" event={"ID":"859d552e-d692-4bc9-a4ae-7539674ea119","Type":"ContainerStarted","Data":"c74a3ad76f2d348d5059a7457d1a0d042a636076c9cb33ebb437528d4fadd138"} Oct 11 07:01:27 crc kubenswrapper[5055]: I1011 07:01:27.160096 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" event={"ID":"859d552e-d692-4bc9-a4ae-7539674ea119","Type":"ContainerStarted","Data":"8bec2831cad0247fdfeb1c86e20b695a8f65e211a73768c18b105ed0e43caa05"} Oct 11 07:01:27 crc kubenswrapper[5055]: I1011 07:01:27.160859 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:27 crc kubenswrapper[5055]: I1011 07:01:27.176912 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" podStartSLOduration=2.17689221 podStartE2EDuration="2.17689221s" podCreationTimestamp="2025-10-11 07:01:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:01:27.174996687 +0000 UTC m=+470.949270494" watchObservedRunningTime="2025-10-11 07:01:27.17689221 +0000 UTC m=+470.951166017" Oct 11 07:01:46 crc kubenswrapper[5055]: I1011 07:01:46.323642 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-hlzkn" Oct 11 07:01:46 crc kubenswrapper[5055]: I1011 07:01:46.369269 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-svqd7"] Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.421830 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" podUID="7c8fb95a-98dc-4592-afe6-195dc531d8df" containerName="registry" containerID="cri-o://b3267381609136c22c430a81cf0f5fb1e1d76b532b3279de1b1eaada5edde7d5" gracePeriod=30 Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.446484 5055 patch_prober.go:28] interesting pod/image-registry-697d97f7c8-svqd7 container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.13:5000/healthz\": dial tcp 10.217.0.13:5000: connect: connection refused" start-of-body= Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.446542 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" podUID="7c8fb95a-98dc-4592-afe6-195dc531d8df" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.13:5000/healthz\": dial tcp 10.217.0.13:5000: connect: connection refused" Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.733098 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.906572 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/7c8fb95a-98dc-4592-afe6-195dc531d8df-ca-trust-extracted\") pod \"7c8fb95a-98dc-4592-afe6-195dc531d8df\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.906846 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7c8fb95a-98dc-4592-afe6-195dc531d8df-trusted-ca\") pod \"7c8fb95a-98dc-4592-afe6-195dc531d8df\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.907009 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/7c8fb95a-98dc-4592-afe6-195dc531d8df-installation-pull-secrets\") pod \"7c8fb95a-98dc-4592-afe6-195dc531d8df\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.907229 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"7c8fb95a-98dc-4592-afe6-195dc531d8df\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.907396 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45f55\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-kube-api-access-45f55\") pod \"7c8fb95a-98dc-4592-afe6-195dc531d8df\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.907508 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/7c8fb95a-98dc-4592-afe6-195dc531d8df-registry-certificates\") pod \"7c8fb95a-98dc-4592-afe6-195dc531d8df\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.907631 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-registry-tls\") pod \"7c8fb95a-98dc-4592-afe6-195dc531d8df\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.907742 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-bound-sa-token\") pod \"7c8fb95a-98dc-4592-afe6-195dc531d8df\" (UID: \"7c8fb95a-98dc-4592-afe6-195dc531d8df\") " Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.907425 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c8fb95a-98dc-4592-afe6-195dc531d8df-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "7c8fb95a-98dc-4592-afe6-195dc531d8df" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.908193 5055 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7c8fb95a-98dc-4592-afe6-195dc531d8df-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.908377 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c8fb95a-98dc-4592-afe6-195dc531d8df-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "7c8fb95a-98dc-4592-afe6-195dc531d8df" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.916360 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "7c8fb95a-98dc-4592-afe6-195dc531d8df" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.916417 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c8fb95a-98dc-4592-afe6-195dc531d8df-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "7c8fb95a-98dc-4592-afe6-195dc531d8df" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.916875 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "7c8fb95a-98dc-4592-afe6-195dc531d8df" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.917137 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-kube-api-access-45f55" (OuterVolumeSpecName: "kube-api-access-45f55") pod "7c8fb95a-98dc-4592-afe6-195dc531d8df" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df"). InnerVolumeSpecName "kube-api-access-45f55". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.925735 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c8fb95a-98dc-4592-afe6-195dc531d8df-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "7c8fb95a-98dc-4592-afe6-195dc531d8df" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:02:11 crc kubenswrapper[5055]: I1011 07:02:11.961634 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "7c8fb95a-98dc-4592-afe6-195dc531d8df" (UID: "7c8fb95a-98dc-4592-afe6-195dc531d8df"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.009589 5055 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/7c8fb95a-98dc-4592-afe6-195dc531d8df-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.009619 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45f55\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-kube-api-access-45f55\") on node \"crc\" DevicePath \"\"" Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.009627 5055 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/7c8fb95a-98dc-4592-afe6-195dc531d8df-registry-certificates\") on node \"crc\" DevicePath \"\"" Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.009637 5055 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-registry-tls\") on node \"crc\" DevicePath \"\"" Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.009646 5055 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7c8fb95a-98dc-4592-afe6-195dc531d8df-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.009653 5055 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/7c8fb95a-98dc-4592-afe6-195dc531d8df-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.402982 5055 generic.go:334] "Generic (PLEG): container finished" podID="7c8fb95a-98dc-4592-afe6-195dc531d8df" containerID="b3267381609136c22c430a81cf0f5fb1e1d76b532b3279de1b1eaada5edde7d5" exitCode=0 Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.403029 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" event={"ID":"7c8fb95a-98dc-4592-afe6-195dc531d8df","Type":"ContainerDied","Data":"b3267381609136c22c430a81cf0f5fb1e1d76b532b3279de1b1eaada5edde7d5"} Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.403008 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.403068 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-svqd7" event={"ID":"7c8fb95a-98dc-4592-afe6-195dc531d8df","Type":"ContainerDied","Data":"f5c23caebbbe90fcaf46dea06582096e4d0738d7e0d460629ced1ef3e18bd67a"} Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.403088 5055 scope.go:117] "RemoveContainer" containerID="b3267381609136c22c430a81cf0f5fb1e1d76b532b3279de1b1eaada5edde7d5" Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.421959 5055 scope.go:117] "RemoveContainer" containerID="b3267381609136c22c430a81cf0f5fb1e1d76b532b3279de1b1eaada5edde7d5" Oct 11 07:02:12 crc kubenswrapper[5055]: E1011 07:02:12.422337 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3267381609136c22c430a81cf0f5fb1e1d76b532b3279de1b1eaada5edde7d5\": container with ID starting with b3267381609136c22c430a81cf0f5fb1e1d76b532b3279de1b1eaada5edde7d5 not found: ID does not exist" containerID="b3267381609136c22c430a81cf0f5fb1e1d76b532b3279de1b1eaada5edde7d5" Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.422396 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3267381609136c22c430a81cf0f5fb1e1d76b532b3279de1b1eaada5edde7d5"} err="failed to get container status \"b3267381609136c22c430a81cf0f5fb1e1d76b532b3279de1b1eaada5edde7d5\": rpc error: code = NotFound desc = could not find container \"b3267381609136c22c430a81cf0f5fb1e1d76b532b3279de1b1eaada5edde7d5\": container with ID starting with b3267381609136c22c430a81cf0f5fb1e1d76b532b3279de1b1eaada5edde7d5 not found: ID does not exist" Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.439018 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-svqd7"] Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.445029 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-svqd7"] Oct 11 07:02:12 crc kubenswrapper[5055]: I1011 07:02:12.999225 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c8fb95a-98dc-4592-afe6-195dc531d8df" path="/var/lib/kubelet/pods/7c8fb95a-98dc-4592-afe6-195dc531d8df/volumes" Oct 11 07:02:32 crc kubenswrapper[5055]: I1011 07:02:32.422122 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:02:32 crc kubenswrapper[5055]: I1011 07:02:32.422730 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:03:02 crc kubenswrapper[5055]: I1011 07:03:02.421826 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:03:02 crc kubenswrapper[5055]: I1011 07:03:02.422497 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:03:32 crc kubenswrapper[5055]: I1011 07:03:32.422459 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:03:32 crc kubenswrapper[5055]: I1011 07:03:32.423353 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:03:32 crc kubenswrapper[5055]: I1011 07:03:32.423449 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 07:03:32 crc kubenswrapper[5055]: I1011 07:03:32.424417 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4f5499446c25e9b2790ad7e20935a2824f019b893c7b11657b4e851362251fba"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 07:03:32 crc kubenswrapper[5055]: I1011 07:03:32.424518 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://4f5499446c25e9b2790ad7e20935a2824f019b893c7b11657b4e851362251fba" gracePeriod=600 Oct 11 07:03:32 crc kubenswrapper[5055]: I1011 07:03:32.857454 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="4f5499446c25e9b2790ad7e20935a2824f019b893c7b11657b4e851362251fba" exitCode=0 Oct 11 07:03:32 crc kubenswrapper[5055]: I1011 07:03:32.857551 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"4f5499446c25e9b2790ad7e20935a2824f019b893c7b11657b4e851362251fba"} Oct 11 07:03:32 crc kubenswrapper[5055]: I1011 07:03:32.857925 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"f20ff53f9730b9dbe8d7b3de68e2425dea7f2c46dfb8cf2611b1fe493a8836b0"} Oct 11 07:03:32 crc kubenswrapper[5055]: I1011 07:03:32.857952 5055 scope.go:117] "RemoveContainer" containerID="0bef0585b62d67d040f6467382e7b6baaa0abbf7bfa19e2efffdf735fa6706f2" Oct 11 07:05:32 crc kubenswrapper[5055]: I1011 07:05:32.422638 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:05:32 crc kubenswrapper[5055]: I1011 07:05:32.423457 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:06:02 crc kubenswrapper[5055]: I1011 07:06:02.422646 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:06:02 crc kubenswrapper[5055]: I1011 07:06:02.423256 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.254094 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-jsqdp"] Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.254616 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" podUID="c7f95b49-339c-401f-975f-e356a2077b01" containerName="controller-manager" containerID="cri-o://6b73530303808f4d48ac48b5c9b3b43579dc4daee5f2bcc267f6b1a95294c416" gracePeriod=30 Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.348953 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb"] Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.349140 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" podUID="0f5d0c4c-155d-4fdc-851c-779e57302a8c" containerName="route-controller-manager" containerID="cri-o://1891e00814cfe184e8128e8367293916a86dc16ed80b47ebfd2f65dc24988713" gracePeriod=30 Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.604564 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.640586 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.670059 5055 generic.go:334] "Generic (PLEG): container finished" podID="0f5d0c4c-155d-4fdc-851c-779e57302a8c" containerID="1891e00814cfe184e8128e8367293916a86dc16ed80b47ebfd2f65dc24988713" exitCode=0 Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.670130 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" event={"ID":"0f5d0c4c-155d-4fdc-851c-779e57302a8c","Type":"ContainerDied","Data":"1891e00814cfe184e8128e8367293916a86dc16ed80b47ebfd2f65dc24988713"} Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.670157 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" event={"ID":"0f5d0c4c-155d-4fdc-851c-779e57302a8c","Type":"ContainerDied","Data":"5f6deffc7d43bd545c53fffa328bae0a3686e11e81f66b0e722694a569cfd2ae"} Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.670171 5055 scope.go:117] "RemoveContainer" containerID="1891e00814cfe184e8128e8367293916a86dc16ed80b47ebfd2f65dc24988713" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.670268 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.672253 5055 generic.go:334] "Generic (PLEG): container finished" podID="c7f95b49-339c-401f-975f-e356a2077b01" containerID="6b73530303808f4d48ac48b5c9b3b43579dc4daee5f2bcc267f6b1a95294c416" exitCode=0 Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.672284 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" event={"ID":"c7f95b49-339c-401f-975f-e356a2077b01","Type":"ContainerDied","Data":"6b73530303808f4d48ac48b5c9b3b43579dc4daee5f2bcc267f6b1a95294c416"} Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.672298 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" event={"ID":"c7f95b49-339c-401f-975f-e356a2077b01","Type":"ContainerDied","Data":"b476c5273f0eb8d5e408d102517301b7fbc7c272ebd0b2121f717ff54d86cb52"} Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.672339 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-jsqdp" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.683911 5055 scope.go:117] "RemoveContainer" containerID="1891e00814cfe184e8128e8367293916a86dc16ed80b47ebfd2f65dc24988713" Oct 11 07:06:05 crc kubenswrapper[5055]: E1011 07:06:05.684374 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1891e00814cfe184e8128e8367293916a86dc16ed80b47ebfd2f65dc24988713\": container with ID starting with 1891e00814cfe184e8128e8367293916a86dc16ed80b47ebfd2f65dc24988713 not found: ID does not exist" containerID="1891e00814cfe184e8128e8367293916a86dc16ed80b47ebfd2f65dc24988713" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.684402 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1891e00814cfe184e8128e8367293916a86dc16ed80b47ebfd2f65dc24988713"} err="failed to get container status \"1891e00814cfe184e8128e8367293916a86dc16ed80b47ebfd2f65dc24988713\": rpc error: code = NotFound desc = could not find container \"1891e00814cfe184e8128e8367293916a86dc16ed80b47ebfd2f65dc24988713\": container with ID starting with 1891e00814cfe184e8128e8367293916a86dc16ed80b47ebfd2f65dc24988713 not found: ID does not exist" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.684423 5055 scope.go:117] "RemoveContainer" containerID="6b73530303808f4d48ac48b5c9b3b43579dc4daee5f2bcc267f6b1a95294c416" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.696084 5055 scope.go:117] "RemoveContainer" containerID="6b73530303808f4d48ac48b5c9b3b43579dc4daee5f2bcc267f6b1a95294c416" Oct 11 07:06:05 crc kubenswrapper[5055]: E1011 07:06:05.696493 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b73530303808f4d48ac48b5c9b3b43579dc4daee5f2bcc267f6b1a95294c416\": container with ID starting with 6b73530303808f4d48ac48b5c9b3b43579dc4daee5f2bcc267f6b1a95294c416 not found: ID does not exist" containerID="6b73530303808f4d48ac48b5c9b3b43579dc4daee5f2bcc267f6b1a95294c416" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.696533 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b73530303808f4d48ac48b5c9b3b43579dc4daee5f2bcc267f6b1a95294c416"} err="failed to get container status \"6b73530303808f4d48ac48b5c9b3b43579dc4daee5f2bcc267f6b1a95294c416\": rpc error: code = NotFound desc = could not find container \"6b73530303808f4d48ac48b5c9b3b43579dc4daee5f2bcc267f6b1a95294c416\": container with ID starting with 6b73530303808f4d48ac48b5c9b3b43579dc4daee5f2bcc267f6b1a95294c416 not found: ID does not exist" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.748129 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-client-ca\") pod \"c7f95b49-339c-401f-975f-e356a2077b01\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.748176 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-config\") pod \"c7f95b49-339c-401f-975f-e356a2077b01\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.748243 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f5d0c4c-155d-4fdc-851c-779e57302a8c-client-ca\") pod \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.748282 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f5d0c4c-155d-4fdc-851c-779e57302a8c-serving-cert\") pod \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.748333 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h6g7l\" (UniqueName: \"kubernetes.io/projected/c7f95b49-339c-401f-975f-e356a2077b01-kube-api-access-h6g7l\") pod \"c7f95b49-339c-401f-975f-e356a2077b01\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.748354 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8sncg\" (UniqueName: \"kubernetes.io/projected/0f5d0c4c-155d-4fdc-851c-779e57302a8c-kube-api-access-8sncg\") pod \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.748381 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-proxy-ca-bundles\") pod \"c7f95b49-339c-401f-975f-e356a2077b01\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.748401 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f5d0c4c-155d-4fdc-851c-779e57302a8c-config\") pod \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\" (UID: \"0f5d0c4c-155d-4fdc-851c-779e57302a8c\") " Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.748420 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c7f95b49-339c-401f-975f-e356a2077b01-serving-cert\") pod \"c7f95b49-339c-401f-975f-e356a2077b01\" (UID: \"c7f95b49-339c-401f-975f-e356a2077b01\") " Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.749134 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f5d0c4c-155d-4fdc-851c-779e57302a8c-client-ca" (OuterVolumeSpecName: "client-ca") pod "0f5d0c4c-155d-4fdc-851c-779e57302a8c" (UID: "0f5d0c4c-155d-4fdc-851c-779e57302a8c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.749184 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f5d0c4c-155d-4fdc-851c-779e57302a8c-config" (OuterVolumeSpecName: "config") pod "0f5d0c4c-155d-4fdc-851c-779e57302a8c" (UID: "0f5d0c4c-155d-4fdc-851c-779e57302a8c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.749264 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "c7f95b49-339c-401f-975f-e356a2077b01" (UID: "c7f95b49-339c-401f-975f-e356a2077b01"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.749313 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-config" (OuterVolumeSpecName: "config") pod "c7f95b49-339c-401f-975f-e356a2077b01" (UID: "c7f95b49-339c-401f-975f-e356a2077b01"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.750237 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-client-ca" (OuterVolumeSpecName: "client-ca") pod "c7f95b49-339c-401f-975f-e356a2077b01" (UID: "c7f95b49-339c-401f-975f-e356a2077b01"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.753701 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7f95b49-339c-401f-975f-e356a2077b01-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c7f95b49-339c-401f-975f-e356a2077b01" (UID: "c7f95b49-339c-401f-975f-e356a2077b01"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.753735 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f5d0c4c-155d-4fdc-851c-779e57302a8c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0f5d0c4c-155d-4fdc-851c-779e57302a8c" (UID: "0f5d0c4c-155d-4fdc-851c-779e57302a8c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.753745 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7f95b49-339c-401f-975f-e356a2077b01-kube-api-access-h6g7l" (OuterVolumeSpecName: "kube-api-access-h6g7l") pod "c7f95b49-339c-401f-975f-e356a2077b01" (UID: "c7f95b49-339c-401f-975f-e356a2077b01"). InnerVolumeSpecName "kube-api-access-h6g7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.753771 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f5d0c4c-155d-4fdc-851c-779e57302a8c-kube-api-access-8sncg" (OuterVolumeSpecName: "kube-api-access-8sncg") pod "0f5d0c4c-155d-4fdc-851c-779e57302a8c" (UID: "0f5d0c4c-155d-4fdc-851c-779e57302a8c"). InnerVolumeSpecName "kube-api-access-8sncg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.849873 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0f5d0c4c-155d-4fdc-851c-779e57302a8c-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.849916 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h6g7l\" (UniqueName: \"kubernetes.io/projected/c7f95b49-339c-401f-975f-e356a2077b01-kube-api-access-h6g7l\") on node \"crc\" DevicePath \"\"" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.849931 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8sncg\" (UniqueName: \"kubernetes.io/projected/0f5d0c4c-155d-4fdc-851c-779e57302a8c-kube-api-access-8sncg\") on node \"crc\" DevicePath \"\"" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.849943 5055 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.849957 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f5d0c4c-155d-4fdc-851c-779e57302a8c-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.849968 5055 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c7f95b49-339c-401f-975f-e356a2077b01-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.849979 5055 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-client-ca\") on node \"crc\" DevicePath \"\"" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.850025 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7f95b49-339c-401f-975f-e356a2077b01-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:06:05 crc kubenswrapper[5055]: I1011 07:06:05.850042 5055 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0f5d0c4c-155d-4fdc-851c-779e57302a8c-client-ca\") on node \"crc\" DevicePath \"\"" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.005561 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-jsqdp"] Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.009399 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-jsqdp"] Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.016373 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb"] Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.018988 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx6cb"] Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.755812 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd"] Oct 11 07:06:06 crc kubenswrapper[5055]: E1011 07:06:06.756127 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c8fb95a-98dc-4592-afe6-195dc531d8df" containerName="registry" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.756150 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c8fb95a-98dc-4592-afe6-195dc531d8df" containerName="registry" Oct 11 07:06:06 crc kubenswrapper[5055]: E1011 07:06:06.756184 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f5d0c4c-155d-4fdc-851c-779e57302a8c" containerName="route-controller-manager" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.756195 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f5d0c4c-155d-4fdc-851c-779e57302a8c" containerName="route-controller-manager" Oct 11 07:06:06 crc kubenswrapper[5055]: E1011 07:06:06.756213 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7f95b49-339c-401f-975f-e356a2077b01" containerName="controller-manager" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.756224 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7f95b49-339c-401f-975f-e356a2077b01" containerName="controller-manager" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.756367 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c8fb95a-98dc-4592-afe6-195dc531d8df" containerName="registry" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.756390 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7f95b49-339c-401f-975f-e356a2077b01" containerName="controller-manager" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.756407 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f5d0c4c-155d-4fdc-851c-779e57302a8c" containerName="route-controller-manager" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.756915 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.763812 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5db7f64c8d-f47sw"] Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.764001 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.764252 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.764408 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.764557 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.765091 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.765238 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.765445 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.768163 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.768329 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.768537 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.768692 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.768849 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.770260 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.771280 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd"] Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.775524 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.790424 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5db7f64c8d-f47sw"] Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.861391 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtshx\" (UniqueName: \"kubernetes.io/projected/37ac8414-b0c8-4413-a362-57d401c5849a-kube-api-access-qtshx\") pod \"route-controller-manager-57c648fcbc-ctfkd\" (UID: \"37ac8414-b0c8-4413-a362-57d401c5849a\") " pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.861481 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/37ac8414-b0c8-4413-a362-57d401c5849a-client-ca\") pod \"route-controller-manager-57c648fcbc-ctfkd\" (UID: \"37ac8414-b0c8-4413-a362-57d401c5849a\") " pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.861518 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37ac8414-b0c8-4413-a362-57d401c5849a-config\") pod \"route-controller-manager-57c648fcbc-ctfkd\" (UID: \"37ac8414-b0c8-4413-a362-57d401c5849a\") " pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.861534 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37ac8414-b0c8-4413-a362-57d401c5849a-serving-cert\") pod \"route-controller-manager-57c648fcbc-ctfkd\" (UID: \"37ac8414-b0c8-4413-a362-57d401c5849a\") " pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.962713 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37ac8414-b0c8-4413-a362-57d401c5849a-config\") pod \"route-controller-manager-57c648fcbc-ctfkd\" (UID: \"37ac8414-b0c8-4413-a362-57d401c5849a\") " pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.962836 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76kpv\" (UniqueName: \"kubernetes.io/projected/c3cef513-fcac-4f6f-ac57-91da0bfb4570-kube-api-access-76kpv\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.962870 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c3cef513-fcac-4f6f-ac57-91da0bfb4570-proxy-ca-bundles\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.962906 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37ac8414-b0c8-4413-a362-57d401c5849a-serving-cert\") pod \"route-controller-manager-57c648fcbc-ctfkd\" (UID: \"37ac8414-b0c8-4413-a362-57d401c5849a\") " pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.962966 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3cef513-fcac-4f6f-ac57-91da0bfb4570-config\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.962998 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtshx\" (UniqueName: \"kubernetes.io/projected/37ac8414-b0c8-4413-a362-57d401c5849a-kube-api-access-qtshx\") pod \"route-controller-manager-57c648fcbc-ctfkd\" (UID: \"37ac8414-b0c8-4413-a362-57d401c5849a\") " pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.963044 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c3cef513-fcac-4f6f-ac57-91da0bfb4570-client-ca\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.963104 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c3cef513-fcac-4f6f-ac57-91da0bfb4570-serving-cert\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.963170 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/37ac8414-b0c8-4413-a362-57d401c5849a-client-ca\") pod \"route-controller-manager-57c648fcbc-ctfkd\" (UID: \"37ac8414-b0c8-4413-a362-57d401c5849a\") " pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.964292 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/37ac8414-b0c8-4413-a362-57d401c5849a-client-ca\") pod \"route-controller-manager-57c648fcbc-ctfkd\" (UID: \"37ac8414-b0c8-4413-a362-57d401c5849a\") " pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.964508 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37ac8414-b0c8-4413-a362-57d401c5849a-config\") pod \"route-controller-manager-57c648fcbc-ctfkd\" (UID: \"37ac8414-b0c8-4413-a362-57d401c5849a\") " pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.968594 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37ac8414-b0c8-4413-a362-57d401c5849a-serving-cert\") pod \"route-controller-manager-57c648fcbc-ctfkd\" (UID: \"37ac8414-b0c8-4413-a362-57d401c5849a\") " pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:06 crc kubenswrapper[5055]: I1011 07:06:06.979121 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtshx\" (UniqueName: \"kubernetes.io/projected/37ac8414-b0c8-4413-a362-57d401c5849a-kube-api-access-qtshx\") pod \"route-controller-manager-57c648fcbc-ctfkd\" (UID: \"37ac8414-b0c8-4413-a362-57d401c5849a\") " pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.001282 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f5d0c4c-155d-4fdc-851c-779e57302a8c" path="/var/lib/kubelet/pods/0f5d0c4c-155d-4fdc-851c-779e57302a8c/volumes" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.001979 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7f95b49-339c-401f-975f-e356a2077b01" path="/var/lib/kubelet/pods/c7f95b49-339c-401f-975f-e356a2077b01/volumes" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.063755 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3cef513-fcac-4f6f-ac57-91da0bfb4570-config\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.063871 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c3cef513-fcac-4f6f-ac57-91da0bfb4570-client-ca\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.063928 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c3cef513-fcac-4f6f-ac57-91da0bfb4570-serving-cert\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.064022 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c3cef513-fcac-4f6f-ac57-91da0bfb4570-proxy-ca-bundles\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.064083 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76kpv\" (UniqueName: \"kubernetes.io/projected/c3cef513-fcac-4f6f-ac57-91da0bfb4570-kube-api-access-76kpv\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.065274 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c3cef513-fcac-4f6f-ac57-91da0bfb4570-client-ca\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.065874 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3cef513-fcac-4f6f-ac57-91da0bfb4570-config\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.065939 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c3cef513-fcac-4f6f-ac57-91da0bfb4570-proxy-ca-bundles\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.072385 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c3cef513-fcac-4f6f-ac57-91da0bfb4570-serving-cert\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.081922 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.089966 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76kpv\" (UniqueName: \"kubernetes.io/projected/c3cef513-fcac-4f6f-ac57-91da0bfb4570-kube-api-access-76kpv\") pod \"controller-manager-5db7f64c8d-f47sw\" (UID: \"c3cef513-fcac-4f6f-ac57-91da0bfb4570\") " pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.390244 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.544097 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd"] Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.619211 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5db7f64c8d-f47sw"] Oct 11 07:06:07 crc kubenswrapper[5055]: W1011 07:06:07.624999 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc3cef513_fcac_4f6f_ac57_91da0bfb4570.slice/crio-4709e6d9f97e17e53498797999c18fff7d939abf4625e883cd3d8ff9d41d13e9 WatchSource:0}: Error finding container 4709e6d9f97e17e53498797999c18fff7d939abf4625e883cd3d8ff9d41d13e9: Status 404 returned error can't find the container with id 4709e6d9f97e17e53498797999c18fff7d939abf4625e883cd3d8ff9d41d13e9 Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.691458 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" event={"ID":"c3cef513-fcac-4f6f-ac57-91da0bfb4570","Type":"ContainerStarted","Data":"4709e6d9f97e17e53498797999c18fff7d939abf4625e883cd3d8ff9d41d13e9"} Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.693785 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" event={"ID":"37ac8414-b0c8-4413-a362-57d401c5849a","Type":"ContainerStarted","Data":"41cc0813767c98ab82d4dca3cdecb52672c90978fdc5fc8ba81e85c013d9dbeb"} Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.693969 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" event={"ID":"37ac8414-b0c8-4413-a362-57d401c5849a","Type":"ContainerStarted","Data":"9a11a9ce06380799d0def451f91649f15f7c352ebc4d230bc57ae10c583e5c24"} Oct 11 07:06:07 crc kubenswrapper[5055]: I1011 07:06:07.711321 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" podStartSLOduration=2.711304005 podStartE2EDuration="2.711304005s" podCreationTimestamp="2025-10-11 07:06:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:06:07.709634608 +0000 UTC m=+751.483908425" watchObservedRunningTime="2025-10-11 07:06:07.711304005 +0000 UTC m=+751.485577812" Oct 11 07:06:08 crc kubenswrapper[5055]: I1011 07:06:08.698353 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" event={"ID":"c3cef513-fcac-4f6f-ac57-91da0bfb4570","Type":"ContainerStarted","Data":"ad2322ee8956064ba2a7f1d60204997cbe1a625d5ac8ed5c27305b501a4ba394"} Oct 11 07:06:08 crc kubenswrapper[5055]: I1011 07:06:08.698676 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:08 crc kubenswrapper[5055]: I1011 07:06:08.703048 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-57c648fcbc-ctfkd" Oct 11 07:06:08 crc kubenswrapper[5055]: I1011 07:06:08.714153 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" podStartSLOduration=3.714137864 podStartE2EDuration="3.714137864s" podCreationTimestamp="2025-10-11 07:06:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:06:08.710735147 +0000 UTC m=+752.485008954" watchObservedRunningTime="2025-10-11 07:06:08.714137864 +0000 UTC m=+752.488411661" Oct 11 07:06:09 crc kubenswrapper[5055]: I1011 07:06:09.703357 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:09 crc kubenswrapper[5055]: I1011 07:06:09.708250 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5db7f64c8d-f47sw" Oct 11 07:06:11 crc kubenswrapper[5055]: I1011 07:06:11.496426 5055 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 11 07:06:32 crc kubenswrapper[5055]: I1011 07:06:32.421746 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:06:32 crc kubenswrapper[5055]: I1011 07:06:32.422330 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:06:32 crc kubenswrapper[5055]: I1011 07:06:32.422370 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 07:06:32 crc kubenswrapper[5055]: I1011 07:06:32.422883 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f20ff53f9730b9dbe8d7b3de68e2425dea7f2c46dfb8cf2611b1fe493a8836b0"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 07:06:32 crc kubenswrapper[5055]: I1011 07:06:32.422932 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://f20ff53f9730b9dbe8d7b3de68e2425dea7f2c46dfb8cf2611b1fe493a8836b0" gracePeriod=600 Oct 11 07:06:32 crc kubenswrapper[5055]: I1011 07:06:32.824104 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="f20ff53f9730b9dbe8d7b3de68e2425dea7f2c46dfb8cf2611b1fe493a8836b0" exitCode=0 Oct 11 07:06:32 crc kubenswrapper[5055]: I1011 07:06:32.824182 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"f20ff53f9730b9dbe8d7b3de68e2425dea7f2c46dfb8cf2611b1fe493a8836b0"} Oct 11 07:06:32 crc kubenswrapper[5055]: I1011 07:06:32.824430 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"544f62b5c981db9fe174f64795733dee914a3486ae2b8f0ec320ed7466166586"} Oct 11 07:06:32 crc kubenswrapper[5055]: I1011 07:06:32.824448 5055 scope.go:117] "RemoveContainer" containerID="4f5499446c25e9b2790ad7e20935a2824f019b893c7b11657b4e851362251fba" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.399382 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5dg24"] Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.401179 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovn-controller" containerID="cri-o://8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd" gracePeriod=30 Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.401227 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="nbdb" containerID="cri-o://4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee" gracePeriod=30 Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.401367 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="northd" containerID="cri-o://3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c" gracePeriod=30 Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.401419 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870" gracePeriod=30 Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.401457 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="kube-rbac-proxy-node" containerID="cri-o://cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd" gracePeriod=30 Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.401491 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovn-acl-logging" containerID="cri-o://2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861" gracePeriod=30 Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.401818 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="sbdb" containerID="cri-o://2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f" gracePeriod=30 Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.456500 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" containerID="cri-o://6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172" gracePeriod=30 Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.746804 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovnkube-controller/3.log" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.750690 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovn-acl-logging/0.log" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.751169 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovn-controller/0.log" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.754152 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.802744 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-k5sls"] Oct 11 07:07:06 crc kubenswrapper[5055]: E1011 07:07:06.802949 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.802961 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: E1011 07:07:06.802970 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="northd" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.802978 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="northd" Oct 11 07:07:06 crc kubenswrapper[5055]: E1011 07:07:06.802986 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="kube-rbac-proxy-ovn-metrics" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.802992 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="kube-rbac-proxy-ovn-metrics" Oct 11 07:07:06 crc kubenswrapper[5055]: E1011 07:07:06.802999 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="nbdb" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803004 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="nbdb" Oct 11 07:07:06 crc kubenswrapper[5055]: E1011 07:07:06.803014 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="sbdb" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803020 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="sbdb" Oct 11 07:07:06 crc kubenswrapper[5055]: E1011 07:07:06.803030 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="kube-rbac-proxy-node" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803036 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="kube-rbac-proxy-node" Oct 11 07:07:06 crc kubenswrapper[5055]: E1011 07:07:06.803047 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803054 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: E1011 07:07:06.803063 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovn-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803071 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovn-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: E1011 07:07:06.803090 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803097 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: E1011 07:07:06.803107 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="kubecfg-setup" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803113 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="kubecfg-setup" Oct 11 07:07:06 crc kubenswrapper[5055]: E1011 07:07:06.803121 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovn-acl-logging" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803128 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovn-acl-logging" Oct 11 07:07:06 crc kubenswrapper[5055]: E1011 07:07:06.803140 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803147 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803256 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803266 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="kube-rbac-proxy-ovn-metrics" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803278 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="sbdb" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803286 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovn-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803293 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="northd" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803302 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803310 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="kube-rbac-proxy-node" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803319 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803329 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803337 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="nbdb" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803346 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovn-acl-logging" Oct 11 07:07:06 crc kubenswrapper[5055]: E1011 07:07:06.803447 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803457 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.803548 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerName="ovnkube-controller" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.805446 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929432 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-var-lib-openvswitch\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929517 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvpvs\" (UniqueName: \"kubernetes.io/projected/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-kube-api-access-tvpvs\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929565 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovn-node-metrics-cert\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929591 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-env-overrides\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929629 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-openvswitch\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929647 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-var-lib-cni-networks-ovn-kubernetes\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929660 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-systemd-units\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929712 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-run-ovn-kubernetes\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929730 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-etc-openvswitch\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929747 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-cni-netd\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929803 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovnkube-config\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929817 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-kubelet\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929836 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-node-log\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929874 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-systemd\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929896 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-slash\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929915 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovnkube-script-lib\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929930 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-log-socket\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929943 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929985 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929968 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-cni-bin\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929996 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930017 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930060 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930065 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930061 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930089 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930086 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-slash" (OuterVolumeSpecName: "host-slash") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930020 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930112 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-log-socket" (OuterVolumeSpecName: "log-socket") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930041 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-run-netns\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929573 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930161 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-ovn\") pod \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\" (UID: \"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad\") " Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930200 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930314 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930338 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.929964 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-node-log" (OuterVolumeSpecName: "node-log") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930368 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930400 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-env-overrides\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930436 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-ovnkube-config\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930471 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-run-netns\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930489 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-etc-openvswitch\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930536 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-systemd-units\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930605 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-cni-netd\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930646 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-ovn-node-metrics-cert\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930686 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-node-log\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930739 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gc8pl\" (UniqueName: \"kubernetes.io/projected/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-kube-api-access-gc8pl\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930811 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-ovnkube-script-lib\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930838 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-log-socket\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930866 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-run-ovn\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930885 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-cni-bin\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930937 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-run-ovn-kubernetes\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930959 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-kubelet\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.930996 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-run-openvswitch\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931024 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-slash\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931048 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931074 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-var-lib-openvswitch\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931095 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-run-systemd\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931160 5055 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931172 5055 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931185 5055 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-cni-netd\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931197 5055 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931207 5055 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-kubelet\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931218 5055 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-node-log\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931228 5055 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-slash\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931241 5055 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931252 5055 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-log-socket\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931262 5055 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-cni-bin\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931272 5055 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-run-netns\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931284 5055 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931297 5055 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931307 5055 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931316 5055 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931328 5055 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.931339 5055 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-systemd-units\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.935788 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.936895 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-kube-api-access-tvpvs" (OuterVolumeSpecName: "kube-api-access-tvpvs") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "kube-api-access-tvpvs". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:07:06 crc kubenswrapper[5055]: I1011 07:07:06.943993 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" (UID: "f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.005208 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovnkube-controller/3.log" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.007650 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovn-acl-logging/0.log" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008097 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-5dg24_f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/ovn-controller/0.log" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008555 5055 generic.go:334] "Generic (PLEG): container finished" podID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerID="6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172" exitCode=0 Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008580 5055 generic.go:334] "Generic (PLEG): container finished" podID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerID="2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f" exitCode=0 Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008587 5055 generic.go:334] "Generic (PLEG): container finished" podID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerID="4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee" exitCode=0 Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008593 5055 generic.go:334] "Generic (PLEG): container finished" podID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerID="3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c" exitCode=0 Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008599 5055 generic.go:334] "Generic (PLEG): container finished" podID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerID="9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870" exitCode=0 Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008606 5055 generic.go:334] "Generic (PLEG): container finished" podID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerID="cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd" exitCode=0 Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008613 5055 generic.go:334] "Generic (PLEG): container finished" podID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerID="2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861" exitCode=143 Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008633 5055 generic.go:334] "Generic (PLEG): container finished" podID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" containerID="8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd" exitCode=143 Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008676 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008673 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerDied","Data":"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008739 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerDied","Data":"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008762 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerDied","Data":"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008799 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerDied","Data":"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008818 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerDied","Data":"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008836 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerDied","Data":"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008853 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008869 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008879 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008890 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008899 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008909 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008919 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008929 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008938 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008951 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerDied","Data":"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.008971 5055 scope.go:117] "RemoveContainer" containerID="6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009006 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009023 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009033 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009042 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009055 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009067 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009078 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009088 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009098 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009108 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009123 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerDied","Data":"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009141 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009153 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009164 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009174 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009185 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009195 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009204 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009214 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009223 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009233 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009248 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5dg24" event={"ID":"f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad","Type":"ContainerDied","Data":"7fe38a71ba9b53123eaf6c08fa7d39cbb921d8e7613f0ca6c6e71496a83a4b7d"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009265 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009285 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009298 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009309 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009319 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009329 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009339 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009349 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009360 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.009370 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.012804 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4lplf_c2f344f5-5570-4fb6-b59d-5b881cd1d2cc/kube-multus/2.log" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.013498 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4lplf_c2f344f5-5570-4fb6-b59d-5b881cd1d2cc/kube-multus/1.log" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.013590 5055 generic.go:334] "Generic (PLEG): container finished" podID="c2f344f5-5570-4fb6-b59d-5b881cd1d2cc" containerID="f04261c508ee02b5a266697e678b45e465a2d37a77e6fe7b037dea3c5c7aaf65" exitCode=2 Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.013650 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4lplf" event={"ID":"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc","Type":"ContainerDied","Data":"f04261c508ee02b5a266697e678b45e465a2d37a77e6fe7b037dea3c5c7aaf65"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.013673 5055 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"649eaa9ef8bd7b297db51d6ef876f638cb0f10efb41df3e6cb169ac227f7f849"} Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.014500 5055 scope.go:117] "RemoveContainer" containerID="f04261c508ee02b5a266697e678b45e465a2d37a77e6fe7b037dea3c5c7aaf65" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035125 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-env-overrides\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035194 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-ovnkube-config\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035226 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-run-netns\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035262 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-etc-openvswitch\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035284 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-systemd-units\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035304 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-cni-netd\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035342 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-ovn-node-metrics-cert\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035365 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-node-log\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035412 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gc8pl\" (UniqueName: \"kubernetes.io/projected/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-kube-api-access-gc8pl\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035439 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-ovnkube-script-lib\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035460 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-log-socket\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035497 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-run-ovn\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035513 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-cni-bin\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035538 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-run-ovn-kubernetes\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035576 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-kubelet\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035602 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-run-openvswitch\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035621 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-slash\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035657 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035679 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-var-lib-openvswitch\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035698 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-run-systemd\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035691 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-node-log\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035759 5055 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-run-systemd\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035806 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvpvs\" (UniqueName: \"kubernetes.io/projected/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-kube-api-access-tvpvs\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.035816 5055 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.036735 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-env-overrides\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.037387 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-ovnkube-config\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.037437 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-run-netns\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.037479 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-etc-openvswitch\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.037508 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-systemd-units\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.037535 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-cni-netd\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.037865 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-kubelet\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.037909 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-log-socket\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.037941 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-run-ovn\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.037969 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-cni-bin\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.038001 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-run-ovn-kubernetes\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.038045 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.038075 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-run-openvswitch\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.038104 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-host-slash\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.038133 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-var-lib-openvswitch\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.038163 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-run-systemd\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.038696 5055 scope.go:117] "RemoveContainer" containerID="cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.039027 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-ovnkube-script-lib\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.041619 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-ovn-node-metrics-cert\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.059348 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5dg24"] Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.061227 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gc8pl\" (UniqueName: \"kubernetes.io/projected/1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7-kube-api-access-gc8pl\") pod \"ovnkube-node-k5sls\" (UID: \"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7\") " pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.063137 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5dg24"] Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.067438 5055 scope.go:117] "RemoveContainer" containerID="2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.080495 5055 scope.go:117] "RemoveContainer" containerID="4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.097505 5055 scope.go:117] "RemoveContainer" containerID="3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.118635 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.120882 5055 scope.go:117] "RemoveContainer" containerID="9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.138130 5055 scope.go:117] "RemoveContainer" containerID="cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd" Oct 11 07:07:07 crc kubenswrapper[5055]: W1011 07:07:07.145540 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1ec9f1bd_b148_47a0_91ab_fc8b1460b5b7.slice/crio-5d3854f2e333dadf8470e206a46ff7603d74faf00282cb02d096fec35990bbd1 WatchSource:0}: Error finding container 5d3854f2e333dadf8470e206a46ff7603d74faf00282cb02d096fec35990bbd1: Status 404 returned error can't find the container with id 5d3854f2e333dadf8470e206a46ff7603d74faf00282cb02d096fec35990bbd1 Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.152806 5055 scope.go:117] "RemoveContainer" containerID="2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.175836 5055 scope.go:117] "RemoveContainer" containerID="8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.189995 5055 scope.go:117] "RemoveContainer" containerID="bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.204278 5055 scope.go:117] "RemoveContainer" containerID="6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172" Oct 11 07:07:07 crc kubenswrapper[5055]: E1011 07:07:07.204783 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172\": container with ID starting with 6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172 not found: ID does not exist" containerID="6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.204817 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172"} err="failed to get container status \"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172\": rpc error: code = NotFound desc = could not find container \"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172\": container with ID starting with 6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.204842 5055 scope.go:117] "RemoveContainer" containerID="cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae" Oct 11 07:07:07 crc kubenswrapper[5055]: E1011 07:07:07.205711 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\": container with ID starting with cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae not found: ID does not exist" containerID="cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.205737 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae"} err="failed to get container status \"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\": rpc error: code = NotFound desc = could not find container \"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\": container with ID starting with cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.205755 5055 scope.go:117] "RemoveContainer" containerID="2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f" Oct 11 07:07:07 crc kubenswrapper[5055]: E1011 07:07:07.206195 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\": container with ID starting with 2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f not found: ID does not exist" containerID="2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.206229 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f"} err="failed to get container status \"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\": rpc error: code = NotFound desc = could not find container \"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\": container with ID starting with 2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.206246 5055 scope.go:117] "RemoveContainer" containerID="4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee" Oct 11 07:07:07 crc kubenswrapper[5055]: E1011 07:07:07.207004 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\": container with ID starting with 4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee not found: ID does not exist" containerID="4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.207034 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee"} err="failed to get container status \"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\": rpc error: code = NotFound desc = could not find container \"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\": container with ID starting with 4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.207303 5055 scope.go:117] "RemoveContainer" containerID="3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c" Oct 11 07:07:07 crc kubenswrapper[5055]: E1011 07:07:07.207656 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\": container with ID starting with 3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c not found: ID does not exist" containerID="3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.207708 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c"} err="failed to get container status \"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\": rpc error: code = NotFound desc = could not find container \"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\": container with ID starting with 3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.207742 5055 scope.go:117] "RemoveContainer" containerID="9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870" Oct 11 07:07:07 crc kubenswrapper[5055]: E1011 07:07:07.208032 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\": container with ID starting with 9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870 not found: ID does not exist" containerID="9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.208065 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870"} err="failed to get container status \"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\": rpc error: code = NotFound desc = could not find container \"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\": container with ID starting with 9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.208085 5055 scope.go:117] "RemoveContainer" containerID="cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd" Oct 11 07:07:07 crc kubenswrapper[5055]: E1011 07:07:07.208303 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\": container with ID starting with cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd not found: ID does not exist" containerID="cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.208331 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd"} err="failed to get container status \"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\": rpc error: code = NotFound desc = could not find container \"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\": container with ID starting with cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.208351 5055 scope.go:117] "RemoveContainer" containerID="2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861" Oct 11 07:07:07 crc kubenswrapper[5055]: E1011 07:07:07.208563 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\": container with ID starting with 2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861 not found: ID does not exist" containerID="2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.208586 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861"} err="failed to get container status \"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\": rpc error: code = NotFound desc = could not find container \"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\": container with ID starting with 2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.208604 5055 scope.go:117] "RemoveContainer" containerID="8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd" Oct 11 07:07:07 crc kubenswrapper[5055]: E1011 07:07:07.208865 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\": container with ID starting with 8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd not found: ID does not exist" containerID="8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.208893 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd"} err="failed to get container status \"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\": rpc error: code = NotFound desc = could not find container \"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\": container with ID starting with 8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.208909 5055 scope.go:117] "RemoveContainer" containerID="bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242" Oct 11 07:07:07 crc kubenswrapper[5055]: E1011 07:07:07.209146 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\": container with ID starting with bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242 not found: ID does not exist" containerID="bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.209171 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242"} err="failed to get container status \"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\": rpc error: code = NotFound desc = could not find container \"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\": container with ID starting with bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.209187 5055 scope.go:117] "RemoveContainer" containerID="6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.209412 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172"} err="failed to get container status \"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172\": rpc error: code = NotFound desc = could not find container \"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172\": container with ID starting with 6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.209443 5055 scope.go:117] "RemoveContainer" containerID="cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.209639 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae"} err="failed to get container status \"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\": rpc error: code = NotFound desc = could not find container \"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\": container with ID starting with cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.209673 5055 scope.go:117] "RemoveContainer" containerID="2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.209893 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f"} err="failed to get container status \"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\": rpc error: code = NotFound desc = could not find container \"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\": container with ID starting with 2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.209918 5055 scope.go:117] "RemoveContainer" containerID="4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.210090 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee"} err="failed to get container status \"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\": rpc error: code = NotFound desc = could not find container \"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\": container with ID starting with 4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.210114 5055 scope.go:117] "RemoveContainer" containerID="3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.210297 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c"} err="failed to get container status \"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\": rpc error: code = NotFound desc = could not find container \"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\": container with ID starting with 3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.210324 5055 scope.go:117] "RemoveContainer" containerID="9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.210504 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870"} err="failed to get container status \"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\": rpc error: code = NotFound desc = could not find container \"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\": container with ID starting with 9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.210526 5055 scope.go:117] "RemoveContainer" containerID="cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.210682 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd"} err="failed to get container status \"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\": rpc error: code = NotFound desc = could not find container \"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\": container with ID starting with cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.210702 5055 scope.go:117] "RemoveContainer" containerID="2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.210943 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861"} err="failed to get container status \"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\": rpc error: code = NotFound desc = could not find container \"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\": container with ID starting with 2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.210965 5055 scope.go:117] "RemoveContainer" containerID="8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.211112 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd"} err="failed to get container status \"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\": rpc error: code = NotFound desc = could not find container \"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\": container with ID starting with 8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.211129 5055 scope.go:117] "RemoveContainer" containerID="bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.211308 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242"} err="failed to get container status \"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\": rpc error: code = NotFound desc = could not find container \"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\": container with ID starting with bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.211333 5055 scope.go:117] "RemoveContainer" containerID="6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.211498 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172"} err="failed to get container status \"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172\": rpc error: code = NotFound desc = could not find container \"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172\": container with ID starting with 6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.211516 5055 scope.go:117] "RemoveContainer" containerID="cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.211694 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae"} err="failed to get container status \"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\": rpc error: code = NotFound desc = could not find container \"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\": container with ID starting with cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.211717 5055 scope.go:117] "RemoveContainer" containerID="2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.211933 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f"} err="failed to get container status \"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\": rpc error: code = NotFound desc = could not find container \"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\": container with ID starting with 2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.211952 5055 scope.go:117] "RemoveContainer" containerID="4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.212117 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee"} err="failed to get container status \"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\": rpc error: code = NotFound desc = could not find container \"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\": container with ID starting with 4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.212141 5055 scope.go:117] "RemoveContainer" containerID="3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.212410 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c"} err="failed to get container status \"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\": rpc error: code = NotFound desc = could not find container \"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\": container with ID starting with 3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.212434 5055 scope.go:117] "RemoveContainer" containerID="9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.214934 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870"} err="failed to get container status \"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\": rpc error: code = NotFound desc = could not find container \"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\": container with ID starting with 9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.214962 5055 scope.go:117] "RemoveContainer" containerID="cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.215686 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd"} err="failed to get container status \"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\": rpc error: code = NotFound desc = could not find container \"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\": container with ID starting with cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.215718 5055 scope.go:117] "RemoveContainer" containerID="2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.216080 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861"} err="failed to get container status \"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\": rpc error: code = NotFound desc = could not find container \"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\": container with ID starting with 2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.216111 5055 scope.go:117] "RemoveContainer" containerID="8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.216456 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd"} err="failed to get container status \"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\": rpc error: code = NotFound desc = could not find container \"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\": container with ID starting with 8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.216477 5055 scope.go:117] "RemoveContainer" containerID="bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.216812 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242"} err="failed to get container status \"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\": rpc error: code = NotFound desc = could not find container \"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\": container with ID starting with bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.216838 5055 scope.go:117] "RemoveContainer" containerID="6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.217126 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172"} err="failed to get container status \"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172\": rpc error: code = NotFound desc = could not find container \"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172\": container with ID starting with 6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.217155 5055 scope.go:117] "RemoveContainer" containerID="cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.217405 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae"} err="failed to get container status \"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\": rpc error: code = NotFound desc = could not find container \"cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae\": container with ID starting with cb4d181e3337aed49eb2b4b07be2677e74cd198124d724db45f6a57c176b19ae not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.217424 5055 scope.go:117] "RemoveContainer" containerID="2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.217735 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f"} err="failed to get container status \"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\": rpc error: code = NotFound desc = could not find container \"2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f\": container with ID starting with 2b6fb85711838faefc7b86c5fc837b038cf8eb5f80fb833acf2f8c5bf39e799f not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.217757 5055 scope.go:117] "RemoveContainer" containerID="4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.218012 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee"} err="failed to get container status \"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\": rpc error: code = NotFound desc = could not find container \"4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee\": container with ID starting with 4c811385027b4155078deb167dbc48c903cc1dcdd69884c9d9a60ca8704344ee not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.218033 5055 scope.go:117] "RemoveContainer" containerID="3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.218351 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c"} err="failed to get container status \"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\": rpc error: code = NotFound desc = could not find container \"3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c\": container with ID starting with 3870883aee3e50855023a47f9b715770fd6d8b66f99de7373d5a26faecc9531c not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.218375 5055 scope.go:117] "RemoveContainer" containerID="9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.218601 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870"} err="failed to get container status \"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\": rpc error: code = NotFound desc = could not find container \"9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870\": container with ID starting with 9feb58ca65e85678e495e47f18a1855b5d226034d4afebc0f60a628ede701870 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.218625 5055 scope.go:117] "RemoveContainer" containerID="cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.218963 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd"} err="failed to get container status \"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\": rpc error: code = NotFound desc = could not find container \"cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd\": container with ID starting with cd0c09623e0275a75f30e930b18e36db9f9c8b1f29dc1c25d31ee3e1ea5ed5cd not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.218983 5055 scope.go:117] "RemoveContainer" containerID="2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.219193 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861"} err="failed to get container status \"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\": rpc error: code = NotFound desc = could not find container \"2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861\": container with ID starting with 2ff9dc481ab6ab6d734ee5afc71a9291ff479f72e8b0b1e269b31e4fa0a6e861 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.219215 5055 scope.go:117] "RemoveContainer" containerID="8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.219557 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd"} err="failed to get container status \"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\": rpc error: code = NotFound desc = could not find container \"8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd\": container with ID starting with 8ddf98644e877905c457473ecad8c086ad5b5ad06a850999da51e99eb7cf53fd not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.219581 5055 scope.go:117] "RemoveContainer" containerID="bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.219888 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242"} err="failed to get container status \"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\": rpc error: code = NotFound desc = could not find container \"bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242\": container with ID starting with bd22c99c4a54f6878a83207dcbeb4b3d8b4cfe3ce5752322fa30d7aca9938242 not found: ID does not exist" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.219907 5055 scope.go:117] "RemoveContainer" containerID="6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172" Oct 11 07:07:07 crc kubenswrapper[5055]: I1011 07:07:07.220159 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172"} err="failed to get container status \"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172\": rpc error: code = NotFound desc = could not find container \"6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172\": container with ID starting with 6cbb35f767e4c20d29e1f5d5f43a6f18890199c65f841a02ed95f507eeec3172 not found: ID does not exist" Oct 11 07:07:08 crc kubenswrapper[5055]: I1011 07:07:08.020713 5055 generic.go:334] "Generic (PLEG): container finished" podID="1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7" containerID="457a51f6659b6ee3cf1902e0188d95bd42bcf2ccd26a707cfbe9ff3913f79155" exitCode=0 Oct 11 07:07:08 crc kubenswrapper[5055]: I1011 07:07:08.020826 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" event={"ID":"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7","Type":"ContainerDied","Data":"457a51f6659b6ee3cf1902e0188d95bd42bcf2ccd26a707cfbe9ff3913f79155"} Oct 11 07:07:08 crc kubenswrapper[5055]: I1011 07:07:08.021067 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" event={"ID":"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7","Type":"ContainerStarted","Data":"5d3854f2e333dadf8470e206a46ff7603d74faf00282cb02d096fec35990bbd1"} Oct 11 07:07:08 crc kubenswrapper[5055]: I1011 07:07:08.025603 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4lplf_c2f344f5-5570-4fb6-b59d-5b881cd1d2cc/kube-multus/2.log" Oct 11 07:07:08 crc kubenswrapper[5055]: I1011 07:07:08.026313 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4lplf_c2f344f5-5570-4fb6-b59d-5b881cd1d2cc/kube-multus/1.log" Oct 11 07:07:08 crc kubenswrapper[5055]: I1011 07:07:08.026350 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-4lplf" event={"ID":"c2f344f5-5570-4fb6-b59d-5b881cd1d2cc","Type":"ContainerStarted","Data":"8e6212b5d60413fcde0eedbcc56df3c4a8b1c1a53bcbd25f602f8ccff555e2ac"} Oct 11 07:07:09 crc kubenswrapper[5055]: I1011 07:07:09.000981 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad" path="/var/lib/kubelet/pods/f87c4acd-8aee-4f7c-b7b0-49b7b43d9cad/volumes" Oct 11 07:07:09 crc kubenswrapper[5055]: I1011 07:07:09.035086 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" event={"ID":"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7","Type":"ContainerStarted","Data":"17cde0f3448be346a7a53b923df1692385e660edff0794f9e3d54d41f4241427"} Oct 11 07:07:09 crc kubenswrapper[5055]: I1011 07:07:09.035134 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" event={"ID":"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7","Type":"ContainerStarted","Data":"473936aae355cc1a83d45fc987311cb4d16c69322d9849ce370d73e987039254"} Oct 11 07:07:09 crc kubenswrapper[5055]: I1011 07:07:09.035147 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" event={"ID":"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7","Type":"ContainerStarted","Data":"d976330edcedf09b5be825ffec33f677ce037a121141cc8edb3ef0521a1ef4a4"} Oct 11 07:07:09 crc kubenswrapper[5055]: I1011 07:07:09.035158 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" event={"ID":"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7","Type":"ContainerStarted","Data":"1ed6fb0a390ec05dba036416d3bbbebca85cc4bf3bec0596431609082d707fd6"} Oct 11 07:07:09 crc kubenswrapper[5055]: I1011 07:07:09.035168 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" event={"ID":"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7","Type":"ContainerStarted","Data":"481c1453961a4530bf78c180ede879e3c931d4178618dc2eaea304f8db27bfe1"} Oct 11 07:07:09 crc kubenswrapper[5055]: I1011 07:07:09.035179 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" event={"ID":"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7","Type":"ContainerStarted","Data":"4ca0edec0f8a33abd9d730a4bce753c83ec4f004e0a959a15cf28814eca93514"} Oct 11 07:07:11 crc kubenswrapper[5055]: I1011 07:07:11.053338 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" event={"ID":"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7","Type":"ContainerStarted","Data":"2a0124594705c908878e1d9ecb7aada599a6df05d04b819585c79c75fbb27981"} Oct 11 07:07:14 crc kubenswrapper[5055]: I1011 07:07:14.074559 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" event={"ID":"1ec9f1bd-b148-47a0-91ab-fc8b1460b5b7","Type":"ContainerStarted","Data":"26142d2492ddaaf7e494905d6c8503cab583c3a5e44d0d1d8be10405edd1ce32"} Oct 11 07:07:14 crc kubenswrapper[5055]: I1011 07:07:14.075217 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:14 crc kubenswrapper[5055]: I1011 07:07:14.075293 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:14 crc kubenswrapper[5055]: I1011 07:07:14.075355 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:14 crc kubenswrapper[5055]: I1011 07:07:14.128616 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" podStartSLOduration=8.12859875 podStartE2EDuration="8.12859875s" podCreationTimestamp="2025-10-11 07:07:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:07:14.125501692 +0000 UTC m=+817.899775499" watchObservedRunningTime="2025-10-11 07:07:14.12859875 +0000 UTC m=+817.902872577" Oct 11 07:07:14 crc kubenswrapper[5055]: I1011 07:07:14.157204 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:14 crc kubenswrapper[5055]: I1011 07:07:14.168146 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.283421 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-x67nd"] Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.286266 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-x67nd" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.290648 5055 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-d9cnx" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.293499 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.293788 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.298864 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.300557 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-x67nd"] Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.355958 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/213b78e7-6a38-44b0-8fba-b702b7455bb0-node-mnt\") pod \"crc-storage-crc-x67nd\" (UID: \"213b78e7-6a38-44b0-8fba-b702b7455bb0\") " pod="crc-storage/crc-storage-crc-x67nd" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.356912 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/213b78e7-6a38-44b0-8fba-b702b7455bb0-crc-storage\") pod \"crc-storage-crc-x67nd\" (UID: \"213b78e7-6a38-44b0-8fba-b702b7455bb0\") " pod="crc-storage/crc-storage-crc-x67nd" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.356958 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br5bn\" (UniqueName: \"kubernetes.io/projected/213b78e7-6a38-44b0-8fba-b702b7455bb0-kube-api-access-br5bn\") pod \"crc-storage-crc-x67nd\" (UID: \"213b78e7-6a38-44b0-8fba-b702b7455bb0\") " pod="crc-storage/crc-storage-crc-x67nd" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.458366 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/213b78e7-6a38-44b0-8fba-b702b7455bb0-crc-storage\") pod \"crc-storage-crc-x67nd\" (UID: \"213b78e7-6a38-44b0-8fba-b702b7455bb0\") " pod="crc-storage/crc-storage-crc-x67nd" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.458422 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br5bn\" (UniqueName: \"kubernetes.io/projected/213b78e7-6a38-44b0-8fba-b702b7455bb0-kube-api-access-br5bn\") pod \"crc-storage-crc-x67nd\" (UID: \"213b78e7-6a38-44b0-8fba-b702b7455bb0\") " pod="crc-storage/crc-storage-crc-x67nd" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.458453 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/213b78e7-6a38-44b0-8fba-b702b7455bb0-node-mnt\") pod \"crc-storage-crc-x67nd\" (UID: \"213b78e7-6a38-44b0-8fba-b702b7455bb0\") " pod="crc-storage/crc-storage-crc-x67nd" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.458716 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/213b78e7-6a38-44b0-8fba-b702b7455bb0-node-mnt\") pod \"crc-storage-crc-x67nd\" (UID: \"213b78e7-6a38-44b0-8fba-b702b7455bb0\") " pod="crc-storage/crc-storage-crc-x67nd" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.459096 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/213b78e7-6a38-44b0-8fba-b702b7455bb0-crc-storage\") pod \"crc-storage-crc-x67nd\" (UID: \"213b78e7-6a38-44b0-8fba-b702b7455bb0\") " pod="crc-storage/crc-storage-crc-x67nd" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.476140 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br5bn\" (UniqueName: \"kubernetes.io/projected/213b78e7-6a38-44b0-8fba-b702b7455bb0-kube-api-access-br5bn\") pod \"crc-storage-crc-x67nd\" (UID: \"213b78e7-6a38-44b0-8fba-b702b7455bb0\") " pod="crc-storage/crc-storage-crc-x67nd" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.613176 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-x67nd" Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.985319 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-x67nd"] Oct 11 07:07:17 crc kubenswrapper[5055]: W1011 07:07:17.992688 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod213b78e7_6a38_44b0_8fba_b702b7455bb0.slice/crio-b9e5d16a4a450247b7ae865bbd862669ea758b094bcee6986067182b25305ef1 WatchSource:0}: Error finding container b9e5d16a4a450247b7ae865bbd862669ea758b094bcee6986067182b25305ef1: Status 404 returned error can't find the container with id b9e5d16a4a450247b7ae865bbd862669ea758b094bcee6986067182b25305ef1 Oct 11 07:07:17 crc kubenswrapper[5055]: I1011 07:07:17.994654 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 07:07:18 crc kubenswrapper[5055]: I1011 07:07:18.094533 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-x67nd" event={"ID":"213b78e7-6a38-44b0-8fba-b702b7455bb0","Type":"ContainerStarted","Data":"b9e5d16a4a450247b7ae865bbd862669ea758b094bcee6986067182b25305ef1"} Oct 11 07:07:19 crc kubenswrapper[5055]: I1011 07:07:19.102501 5055 generic.go:334] "Generic (PLEG): container finished" podID="213b78e7-6a38-44b0-8fba-b702b7455bb0" containerID="4d3feae4258e3893f934d778db84000cdfd2e9358d19f3911c43a48651d6c923" exitCode=0 Oct 11 07:07:19 crc kubenswrapper[5055]: I1011 07:07:19.102726 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-x67nd" event={"ID":"213b78e7-6a38-44b0-8fba-b702b7455bb0","Type":"ContainerDied","Data":"4d3feae4258e3893f934d778db84000cdfd2e9358d19f3911c43a48651d6c923"} Oct 11 07:07:20 crc kubenswrapper[5055]: I1011 07:07:20.347618 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-x67nd" Oct 11 07:07:20 crc kubenswrapper[5055]: I1011 07:07:20.492381 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br5bn\" (UniqueName: \"kubernetes.io/projected/213b78e7-6a38-44b0-8fba-b702b7455bb0-kube-api-access-br5bn\") pod \"213b78e7-6a38-44b0-8fba-b702b7455bb0\" (UID: \"213b78e7-6a38-44b0-8fba-b702b7455bb0\") " Oct 11 07:07:20 crc kubenswrapper[5055]: I1011 07:07:20.492456 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/213b78e7-6a38-44b0-8fba-b702b7455bb0-crc-storage\") pod \"213b78e7-6a38-44b0-8fba-b702b7455bb0\" (UID: \"213b78e7-6a38-44b0-8fba-b702b7455bb0\") " Oct 11 07:07:20 crc kubenswrapper[5055]: I1011 07:07:20.492543 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/213b78e7-6a38-44b0-8fba-b702b7455bb0-node-mnt\") pod \"213b78e7-6a38-44b0-8fba-b702b7455bb0\" (UID: \"213b78e7-6a38-44b0-8fba-b702b7455bb0\") " Oct 11 07:07:20 crc kubenswrapper[5055]: I1011 07:07:20.492659 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/213b78e7-6a38-44b0-8fba-b702b7455bb0-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "213b78e7-6a38-44b0-8fba-b702b7455bb0" (UID: "213b78e7-6a38-44b0-8fba-b702b7455bb0"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:07:20 crc kubenswrapper[5055]: I1011 07:07:20.492857 5055 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/213b78e7-6a38-44b0-8fba-b702b7455bb0-node-mnt\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:20 crc kubenswrapper[5055]: I1011 07:07:20.496887 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/213b78e7-6a38-44b0-8fba-b702b7455bb0-kube-api-access-br5bn" (OuterVolumeSpecName: "kube-api-access-br5bn") pod "213b78e7-6a38-44b0-8fba-b702b7455bb0" (UID: "213b78e7-6a38-44b0-8fba-b702b7455bb0"). InnerVolumeSpecName "kube-api-access-br5bn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:07:20 crc kubenswrapper[5055]: I1011 07:07:20.506964 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/213b78e7-6a38-44b0-8fba-b702b7455bb0-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "213b78e7-6a38-44b0-8fba-b702b7455bb0" (UID: "213b78e7-6a38-44b0-8fba-b702b7455bb0"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:07:20 crc kubenswrapper[5055]: I1011 07:07:20.594227 5055 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/213b78e7-6a38-44b0-8fba-b702b7455bb0-crc-storage\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:20 crc kubenswrapper[5055]: I1011 07:07:20.594266 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br5bn\" (UniqueName: \"kubernetes.io/projected/213b78e7-6a38-44b0-8fba-b702b7455bb0-kube-api-access-br5bn\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:21 crc kubenswrapper[5055]: I1011 07:07:21.113398 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-x67nd" event={"ID":"213b78e7-6a38-44b0-8fba-b702b7455bb0","Type":"ContainerDied","Data":"b9e5d16a4a450247b7ae865bbd862669ea758b094bcee6986067182b25305ef1"} Oct 11 07:07:21 crc kubenswrapper[5055]: I1011 07:07:21.113434 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9e5d16a4a450247b7ae865bbd862669ea758b094bcee6986067182b25305ef1" Oct 11 07:07:21 crc kubenswrapper[5055]: I1011 07:07:21.114073 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-x67nd" Oct 11 07:07:25 crc kubenswrapper[5055]: I1011 07:07:25.853073 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rlrfg"] Oct 11 07:07:25 crc kubenswrapper[5055]: E1011 07:07:25.853399 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="213b78e7-6a38-44b0-8fba-b702b7455bb0" containerName="storage" Oct 11 07:07:25 crc kubenswrapper[5055]: I1011 07:07:25.853418 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="213b78e7-6a38-44b0-8fba-b702b7455bb0" containerName="storage" Oct 11 07:07:25 crc kubenswrapper[5055]: I1011 07:07:25.853617 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="213b78e7-6a38-44b0-8fba-b702b7455bb0" containerName="storage" Oct 11 07:07:25 crc kubenswrapper[5055]: I1011 07:07:25.855116 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rlrfg" Oct 11 07:07:25 crc kubenswrapper[5055]: I1011 07:07:25.864899 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rlrfg"] Oct 11 07:07:25 crc kubenswrapper[5055]: I1011 07:07:25.967917 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8291e538-31d8-4d0e-baea-db4a67872431-catalog-content\") pod \"redhat-operators-rlrfg\" (UID: \"8291e538-31d8-4d0e-baea-db4a67872431\") " pod="openshift-marketplace/redhat-operators-rlrfg" Oct 11 07:07:25 crc kubenswrapper[5055]: I1011 07:07:25.967972 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8291e538-31d8-4d0e-baea-db4a67872431-utilities\") pod \"redhat-operators-rlrfg\" (UID: \"8291e538-31d8-4d0e-baea-db4a67872431\") " pod="openshift-marketplace/redhat-operators-rlrfg" Oct 11 07:07:25 crc kubenswrapper[5055]: I1011 07:07:25.968040 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dc6dz\" (UniqueName: \"kubernetes.io/projected/8291e538-31d8-4d0e-baea-db4a67872431-kube-api-access-dc6dz\") pod \"redhat-operators-rlrfg\" (UID: \"8291e538-31d8-4d0e-baea-db4a67872431\") " pod="openshift-marketplace/redhat-operators-rlrfg" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.069733 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8291e538-31d8-4d0e-baea-db4a67872431-utilities\") pod \"redhat-operators-rlrfg\" (UID: \"8291e538-31d8-4d0e-baea-db4a67872431\") " pod="openshift-marketplace/redhat-operators-rlrfg" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.070191 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dc6dz\" (UniqueName: \"kubernetes.io/projected/8291e538-31d8-4d0e-baea-db4a67872431-kube-api-access-dc6dz\") pod \"redhat-operators-rlrfg\" (UID: \"8291e538-31d8-4d0e-baea-db4a67872431\") " pod="openshift-marketplace/redhat-operators-rlrfg" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.070225 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8291e538-31d8-4d0e-baea-db4a67872431-catalog-content\") pod \"redhat-operators-rlrfg\" (UID: \"8291e538-31d8-4d0e-baea-db4a67872431\") " pod="openshift-marketplace/redhat-operators-rlrfg" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.071051 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8291e538-31d8-4d0e-baea-db4a67872431-catalog-content\") pod \"redhat-operators-rlrfg\" (UID: \"8291e538-31d8-4d0e-baea-db4a67872431\") " pod="openshift-marketplace/redhat-operators-rlrfg" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.071342 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8291e538-31d8-4d0e-baea-db4a67872431-utilities\") pod \"redhat-operators-rlrfg\" (UID: \"8291e538-31d8-4d0e-baea-db4a67872431\") " pod="openshift-marketplace/redhat-operators-rlrfg" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.092835 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dc6dz\" (UniqueName: \"kubernetes.io/projected/8291e538-31d8-4d0e-baea-db4a67872431-kube-api-access-dc6dz\") pod \"redhat-operators-rlrfg\" (UID: \"8291e538-31d8-4d0e-baea-db4a67872431\") " pod="openshift-marketplace/redhat-operators-rlrfg" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.228182 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rlrfg" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.542449 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8n7bz"] Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.543074 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8n7bz" podUID="0e7a03ef-3f5d-43be-9dd4-d375c36898e9" containerName="registry-server" containerID="cri-o://4d0dc7e35f7464a6caae8fd6d615f772cffc64a603c98080519ebf4e4413c6c7" gracePeriod=30 Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.547526 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bqxlg"] Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.547821 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bqxlg" podUID="3d9e3839-fc39-4efc-9aaf-ff4554f45935" containerName="registry-server" containerID="cri-o://c9f4446a16fede46b698c9689ec30937c847a994d29f342eb136e4eb77129672" gracePeriod=30 Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.582889 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-slk95"] Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.583110 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-slk95" podUID="1821ec16-ef47-498d-a4cf-f72b5afcc7b0" containerName="marketplace-operator" containerID="cri-o://a387875d6d84f6bee6a3f27d2e82d22a879757e89df7b12b003953fdfa9251e2" gracePeriod=30 Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.584285 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gbjgh"] Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.584619 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gbjgh" podUID="76714795-e16a-4a12-adca-70e428270dd0" containerName="registry-server" containerID="cri-o://3c02583cfa1ad23f58a6e7cc23c4ddb2dd55a0c305c5c9539076186e7911e8a0" gracePeriod=30 Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.588026 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9qmg4"] Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.588669 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.599835 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9qmg4"] Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.603411 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rlrfg"] Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.615125 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rm5wf"] Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.615418 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rm5wf" podUID="05199d34-a391-4e72-b9b3-7864f988c137" containerName="registry-server" containerID="cri-o://a2c471ac93fa17c491ea757c3212a201fb7e06b5d36c6ce1bf0ebb57518f111d" gracePeriod=30 Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.647343 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rlrfg"] Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.676438 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7530e962-328a-4b9c-8a07-c2f055845eda-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9qmg4\" (UID: \"7530e962-328a-4b9c-8a07-c2f055845eda\") " pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.676503 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsp9q\" (UniqueName: \"kubernetes.io/projected/7530e962-328a-4b9c-8a07-c2f055845eda-kube-api-access-nsp9q\") pod \"marketplace-operator-79b997595-9qmg4\" (UID: \"7530e962-328a-4b9c-8a07-c2f055845eda\") " pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.676527 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7530e962-328a-4b9c-8a07-c2f055845eda-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9qmg4\" (UID: \"7530e962-328a-4b9c-8a07-c2f055845eda\") " pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.777698 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsp9q\" (UniqueName: \"kubernetes.io/projected/7530e962-328a-4b9c-8a07-c2f055845eda-kube-api-access-nsp9q\") pod \"marketplace-operator-79b997595-9qmg4\" (UID: \"7530e962-328a-4b9c-8a07-c2f055845eda\") " pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.777744 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7530e962-328a-4b9c-8a07-c2f055845eda-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9qmg4\" (UID: \"7530e962-328a-4b9c-8a07-c2f055845eda\") " pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.777835 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7530e962-328a-4b9c-8a07-c2f055845eda-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9qmg4\" (UID: \"7530e962-328a-4b9c-8a07-c2f055845eda\") " pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.780320 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7530e962-328a-4b9c-8a07-c2f055845eda-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9qmg4\" (UID: \"7530e962-328a-4b9c-8a07-c2f055845eda\") " pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.781807 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7530e962-328a-4b9c-8a07-c2f055845eda-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9qmg4\" (UID: \"7530e962-328a-4b9c-8a07-c2f055845eda\") " pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.795684 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsp9q\" (UniqueName: \"kubernetes.io/projected/7530e962-328a-4b9c-8a07-c2f055845eda-kube-api-access-nsp9q\") pod \"marketplace-operator-79b997595-9qmg4\" (UID: \"7530e962-328a-4b9c-8a07-c2f055845eda\") " pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" Oct 11 07:07:26 crc kubenswrapper[5055]: I1011 07:07:26.912214 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.013653 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bqxlg" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.075415 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.082032 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9e3839-fc39-4efc-9aaf-ff4554f45935-catalog-content\") pod \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\" (UID: \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.082087 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9e3839-fc39-4efc-9aaf-ff4554f45935-utilities\") pod \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\" (UID: \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.082184 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n8nbk\" (UniqueName: \"kubernetes.io/projected/3d9e3839-fc39-4efc-9aaf-ff4554f45935-kube-api-access-n8nbk\") pod \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\" (UID: \"3d9e3839-fc39-4efc-9aaf-ff4554f45935\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.083448 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d9e3839-fc39-4efc-9aaf-ff4554f45935-utilities" (OuterVolumeSpecName: "utilities") pod "3d9e3839-fc39-4efc-9aaf-ff4554f45935" (UID: "3d9e3839-fc39-4efc-9aaf-ff4554f45935"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.090343 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d9e3839-fc39-4efc-9aaf-ff4554f45935-kube-api-access-n8nbk" (OuterVolumeSpecName: "kube-api-access-n8nbk") pod "3d9e3839-fc39-4efc-9aaf-ff4554f45935" (UID: "3d9e3839-fc39-4efc-9aaf-ff4554f45935"). InnerVolumeSpecName "kube-api-access-n8nbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.146643 5055 generic.go:334] "Generic (PLEG): container finished" podID="8291e538-31d8-4d0e-baea-db4a67872431" containerID="21cf0367f513b7030bb057e1275ace0dae625acd73532fa9881fc777f9098902" exitCode=0 Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.146708 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rlrfg" event={"ID":"8291e538-31d8-4d0e-baea-db4a67872431","Type":"ContainerDied","Data":"21cf0367f513b7030bb057e1275ace0dae625acd73532fa9881fc777f9098902"} Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.146734 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rlrfg" event={"ID":"8291e538-31d8-4d0e-baea-db4a67872431","Type":"ContainerStarted","Data":"d33b782582d3c2ecda43ad9faf015cccb28e3f97f10045111464abd03a14669a"} Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.150176 5055 generic.go:334] "Generic (PLEG): container finished" podID="05199d34-a391-4e72-b9b3-7864f988c137" containerID="a2c471ac93fa17c491ea757c3212a201fb7e06b5d36c6ce1bf0ebb57518f111d" exitCode=0 Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.150227 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rm5wf" event={"ID":"05199d34-a391-4e72-b9b3-7864f988c137","Type":"ContainerDied","Data":"a2c471ac93fa17c491ea757c3212a201fb7e06b5d36c6ce1bf0ebb57518f111d"} Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.153422 5055 generic.go:334] "Generic (PLEG): container finished" podID="3d9e3839-fc39-4efc-9aaf-ff4554f45935" containerID="c9f4446a16fede46b698c9689ec30937c847a994d29f342eb136e4eb77129672" exitCode=0 Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.153469 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bqxlg" event={"ID":"3d9e3839-fc39-4efc-9aaf-ff4554f45935","Type":"ContainerDied","Data":"c9f4446a16fede46b698c9689ec30937c847a994d29f342eb136e4eb77129672"} Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.153486 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bqxlg" event={"ID":"3d9e3839-fc39-4efc-9aaf-ff4554f45935","Type":"ContainerDied","Data":"2aff22011331f5a8876481c7a72f6b9c4ea37993d9a26907abcacea7bf94b40f"} Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.153508 5055 scope.go:117] "RemoveContainer" containerID="c9f4446a16fede46b698c9689ec30937c847a994d29f342eb136e4eb77129672" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.153596 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bqxlg" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.156827 5055 generic.go:334] "Generic (PLEG): container finished" podID="0e7a03ef-3f5d-43be-9dd4-d375c36898e9" containerID="4d0dc7e35f7464a6caae8fd6d615f772cffc64a603c98080519ebf4e4413c6c7" exitCode=0 Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.156876 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8n7bz" event={"ID":"0e7a03ef-3f5d-43be-9dd4-d375c36898e9","Type":"ContainerDied","Data":"4d0dc7e35f7464a6caae8fd6d615f772cffc64a603c98080519ebf4e4413c6c7"} Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.156892 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8n7bz" event={"ID":"0e7a03ef-3f5d-43be-9dd4-d375c36898e9","Type":"ContainerDied","Data":"e98ca9a0c7a18847491cd4f0edc70d0fc4f2481e6e46b1db34ef6d0227a162da"} Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.156934 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8n7bz" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.161798 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d9e3839-fc39-4efc-9aaf-ff4554f45935-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3d9e3839-fc39-4efc-9aaf-ff4554f45935" (UID: "3d9e3839-fc39-4efc-9aaf-ff4554f45935"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.169340 5055 generic.go:334] "Generic (PLEG): container finished" podID="76714795-e16a-4a12-adca-70e428270dd0" containerID="3c02583cfa1ad23f58a6e7cc23c4ddb2dd55a0c305c5c9539076186e7911e8a0" exitCode=0 Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.169372 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gbjgh" event={"ID":"76714795-e16a-4a12-adca-70e428270dd0","Type":"ContainerDied","Data":"3c02583cfa1ad23f58a6e7cc23c4ddb2dd55a0c305c5c9539076186e7911e8a0"} Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.169416 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gbjgh" event={"ID":"76714795-e16a-4a12-adca-70e428270dd0","Type":"ContainerDied","Data":"7f3ce7e7f176d638fa81a4d88adec6b0b7d4a8e7db4eb8f1c7bb3700eb3cab48"} Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.169426 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f3ce7e7f176d638fa81a4d88adec6b0b7d4a8e7db4eb8f1c7bb3700eb3cab48" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.172405 5055 generic.go:334] "Generic (PLEG): container finished" podID="1821ec16-ef47-498d-a4cf-f72b5afcc7b0" containerID="a387875d6d84f6bee6a3f27d2e82d22a879757e89df7b12b003953fdfa9251e2" exitCode=0 Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.172444 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-slk95" event={"ID":"1821ec16-ef47-498d-a4cf-f72b5afcc7b0","Type":"ContainerDied","Data":"a387875d6d84f6bee6a3f27d2e82d22a879757e89df7b12b003953fdfa9251e2"} Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.172464 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-slk95" event={"ID":"1821ec16-ef47-498d-a4cf-f72b5afcc7b0","Type":"ContainerDied","Data":"3c5dcad0b53816b9d89a626764b048e30f6256c9fd146b6643696126113e6e51"} Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.172477 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3c5dcad0b53816b9d89a626764b048e30f6256c9fd146b6643696126113e6e51" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.183325 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-catalog-content\") pod \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\" (UID: \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.183371 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-utilities\") pod \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\" (UID: \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.183443 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6z7h\" (UniqueName: \"kubernetes.io/projected/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-kube-api-access-g6z7h\") pod \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\" (UID: \"0e7a03ef-3f5d-43be-9dd4-d375c36898e9\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.183625 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n8nbk\" (UniqueName: \"kubernetes.io/projected/3d9e3839-fc39-4efc-9aaf-ff4554f45935-kube-api-access-n8nbk\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.183645 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9e3839-fc39-4efc-9aaf-ff4554f45935-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.183654 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9e3839-fc39-4efc-9aaf-ff4554f45935-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.184796 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-utilities" (OuterVolumeSpecName: "utilities") pod "0e7a03ef-3f5d-43be-9dd4-d375c36898e9" (UID: "0e7a03ef-3f5d-43be-9dd4-d375c36898e9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.187147 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-kube-api-access-g6z7h" (OuterVolumeSpecName: "kube-api-access-g6z7h") pod "0e7a03ef-3f5d-43be-9dd4-d375c36898e9" (UID: "0e7a03ef-3f5d-43be-9dd4-d375c36898e9"). InnerVolumeSpecName "kube-api-access-g6z7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.230016 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0e7a03ef-3f5d-43be-9dd4-d375c36898e9" (UID: "0e7a03ef-3f5d-43be-9dd4-d375c36898e9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.255230 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.262198 5055 scope.go:117] "RemoveContainer" containerID="abe838f65da8c18d6a01688ebbfeb37af5bab36e86ea8ff4930276acbf40b75f" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.268134 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.279098 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.284265 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76714795-e16a-4a12-adca-70e428270dd0-catalog-content\") pod \"76714795-e16a-4a12-adca-70e428270dd0\" (UID: \"76714795-e16a-4a12-adca-70e428270dd0\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.284323 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-marketplace-trusted-ca\") pod \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\" (UID: \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.284363 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpg7x\" (UniqueName: \"kubernetes.io/projected/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-kube-api-access-kpg7x\") pod \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\" (UID: \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.284392 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfnbb\" (UniqueName: \"kubernetes.io/projected/76714795-e16a-4a12-adca-70e428270dd0-kube-api-access-qfnbb\") pod \"76714795-e16a-4a12-adca-70e428270dd0\" (UID: \"76714795-e16a-4a12-adca-70e428270dd0\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.284415 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76714795-e16a-4a12-adca-70e428270dd0-utilities\") pod \"76714795-e16a-4a12-adca-70e428270dd0\" (UID: \"76714795-e16a-4a12-adca-70e428270dd0\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.284443 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-marketplace-operator-metrics\") pod \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\" (UID: \"1821ec16-ef47-498d-a4cf-f72b5afcc7b0\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.284653 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.284664 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.284673 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6z7h\" (UniqueName: \"kubernetes.io/projected/0e7a03ef-3f5d-43be-9dd4-d375c36898e9-kube-api-access-g6z7h\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.285547 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "1821ec16-ef47-498d-a4cf-f72b5afcc7b0" (UID: "1821ec16-ef47-498d-a4cf-f72b5afcc7b0"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.285804 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76714795-e16a-4a12-adca-70e428270dd0-utilities" (OuterVolumeSpecName: "utilities") pod "76714795-e16a-4a12-adca-70e428270dd0" (UID: "76714795-e16a-4a12-adca-70e428270dd0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.287600 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-kube-api-access-kpg7x" (OuterVolumeSpecName: "kube-api-access-kpg7x") pod "1821ec16-ef47-498d-a4cf-f72b5afcc7b0" (UID: "1821ec16-ef47-498d-a4cf-f72b5afcc7b0"). InnerVolumeSpecName "kube-api-access-kpg7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.288831 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76714795-e16a-4a12-adca-70e428270dd0-kube-api-access-qfnbb" (OuterVolumeSpecName: "kube-api-access-qfnbb") pod "76714795-e16a-4a12-adca-70e428270dd0" (UID: "76714795-e16a-4a12-adca-70e428270dd0"). InnerVolumeSpecName "kube-api-access-qfnbb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.290429 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "1821ec16-ef47-498d-a4cf-f72b5afcc7b0" (UID: "1821ec16-ef47-498d-a4cf-f72b5afcc7b0"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.293098 5055 scope.go:117] "RemoveContainer" containerID="e347b53d8923f7d7a91e5016e38c6e7e48c4e39520eb709923756644a08f5dfc" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.300120 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76714795-e16a-4a12-adca-70e428270dd0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "76714795-e16a-4a12-adca-70e428270dd0" (UID: "76714795-e16a-4a12-adca-70e428270dd0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.326323 5055 scope.go:117] "RemoveContainer" containerID="c9f4446a16fede46b698c9689ec30937c847a994d29f342eb136e4eb77129672" Oct 11 07:07:27 crc kubenswrapper[5055]: E1011 07:07:27.326632 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9f4446a16fede46b698c9689ec30937c847a994d29f342eb136e4eb77129672\": container with ID starting with c9f4446a16fede46b698c9689ec30937c847a994d29f342eb136e4eb77129672 not found: ID does not exist" containerID="c9f4446a16fede46b698c9689ec30937c847a994d29f342eb136e4eb77129672" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.326690 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9f4446a16fede46b698c9689ec30937c847a994d29f342eb136e4eb77129672"} err="failed to get container status \"c9f4446a16fede46b698c9689ec30937c847a994d29f342eb136e4eb77129672\": rpc error: code = NotFound desc = could not find container \"c9f4446a16fede46b698c9689ec30937c847a994d29f342eb136e4eb77129672\": container with ID starting with c9f4446a16fede46b698c9689ec30937c847a994d29f342eb136e4eb77129672 not found: ID does not exist" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.326720 5055 scope.go:117] "RemoveContainer" containerID="abe838f65da8c18d6a01688ebbfeb37af5bab36e86ea8ff4930276acbf40b75f" Oct 11 07:07:27 crc kubenswrapper[5055]: E1011 07:07:27.327087 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abe838f65da8c18d6a01688ebbfeb37af5bab36e86ea8ff4930276acbf40b75f\": container with ID starting with abe838f65da8c18d6a01688ebbfeb37af5bab36e86ea8ff4930276acbf40b75f not found: ID does not exist" containerID="abe838f65da8c18d6a01688ebbfeb37af5bab36e86ea8ff4930276acbf40b75f" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.327116 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abe838f65da8c18d6a01688ebbfeb37af5bab36e86ea8ff4930276acbf40b75f"} err="failed to get container status \"abe838f65da8c18d6a01688ebbfeb37af5bab36e86ea8ff4930276acbf40b75f\": rpc error: code = NotFound desc = could not find container \"abe838f65da8c18d6a01688ebbfeb37af5bab36e86ea8ff4930276acbf40b75f\": container with ID starting with abe838f65da8c18d6a01688ebbfeb37af5bab36e86ea8ff4930276acbf40b75f not found: ID does not exist" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.327134 5055 scope.go:117] "RemoveContainer" containerID="e347b53d8923f7d7a91e5016e38c6e7e48c4e39520eb709923756644a08f5dfc" Oct 11 07:07:27 crc kubenswrapper[5055]: E1011 07:07:27.327441 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e347b53d8923f7d7a91e5016e38c6e7e48c4e39520eb709923756644a08f5dfc\": container with ID starting with e347b53d8923f7d7a91e5016e38c6e7e48c4e39520eb709923756644a08f5dfc not found: ID does not exist" containerID="e347b53d8923f7d7a91e5016e38c6e7e48c4e39520eb709923756644a08f5dfc" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.327469 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e347b53d8923f7d7a91e5016e38c6e7e48c4e39520eb709923756644a08f5dfc"} err="failed to get container status \"e347b53d8923f7d7a91e5016e38c6e7e48c4e39520eb709923756644a08f5dfc\": rpc error: code = NotFound desc = could not find container \"e347b53d8923f7d7a91e5016e38c6e7e48c4e39520eb709923756644a08f5dfc\": container with ID starting with e347b53d8923f7d7a91e5016e38c6e7e48c4e39520eb709923756644a08f5dfc not found: ID does not exist" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.327486 5055 scope.go:117] "RemoveContainer" containerID="4d0dc7e35f7464a6caae8fd6d615f772cffc64a603c98080519ebf4e4413c6c7" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.330229 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9qmg4"] Oct 11 07:07:27 crc kubenswrapper[5055]: W1011 07:07:27.334414 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7530e962_328a_4b9c_8a07_c2f055845eda.slice/crio-a4bc5c8b520522d5710be514594d0c31f4ffb672e4300102e68bbddef0b5c4ba WatchSource:0}: Error finding container a4bc5c8b520522d5710be514594d0c31f4ffb672e4300102e68bbddef0b5c4ba: Status 404 returned error can't find the container with id a4bc5c8b520522d5710be514594d0c31f4ffb672e4300102e68bbddef0b5c4ba Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.347832 5055 scope.go:117] "RemoveContainer" containerID="f6a550b63b129e7d91f1a976b8b51c44316c82fa97f31d737401bf4659034c44" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.353892 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rlrfg" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.363806 5055 scope.go:117] "RemoveContainer" containerID="bdfbf6499eacebbd2302df4494462b6af1ba11994292a36742a125c15251af6c" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.379477 5055 scope.go:117] "RemoveContainer" containerID="4d0dc7e35f7464a6caae8fd6d615f772cffc64a603c98080519ebf4e4413c6c7" Oct 11 07:07:27 crc kubenswrapper[5055]: E1011 07:07:27.380122 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d0dc7e35f7464a6caae8fd6d615f772cffc64a603c98080519ebf4e4413c6c7\": container with ID starting with 4d0dc7e35f7464a6caae8fd6d615f772cffc64a603c98080519ebf4e4413c6c7 not found: ID does not exist" containerID="4d0dc7e35f7464a6caae8fd6d615f772cffc64a603c98080519ebf4e4413c6c7" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.380158 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d0dc7e35f7464a6caae8fd6d615f772cffc64a603c98080519ebf4e4413c6c7"} err="failed to get container status \"4d0dc7e35f7464a6caae8fd6d615f772cffc64a603c98080519ebf4e4413c6c7\": rpc error: code = NotFound desc = could not find container \"4d0dc7e35f7464a6caae8fd6d615f772cffc64a603c98080519ebf4e4413c6c7\": container with ID starting with 4d0dc7e35f7464a6caae8fd6d615f772cffc64a603c98080519ebf4e4413c6c7 not found: ID does not exist" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.380182 5055 scope.go:117] "RemoveContainer" containerID="f6a550b63b129e7d91f1a976b8b51c44316c82fa97f31d737401bf4659034c44" Oct 11 07:07:27 crc kubenswrapper[5055]: E1011 07:07:27.380504 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6a550b63b129e7d91f1a976b8b51c44316c82fa97f31d737401bf4659034c44\": container with ID starting with f6a550b63b129e7d91f1a976b8b51c44316c82fa97f31d737401bf4659034c44 not found: ID does not exist" containerID="f6a550b63b129e7d91f1a976b8b51c44316c82fa97f31d737401bf4659034c44" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.380555 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6a550b63b129e7d91f1a976b8b51c44316c82fa97f31d737401bf4659034c44"} err="failed to get container status \"f6a550b63b129e7d91f1a976b8b51c44316c82fa97f31d737401bf4659034c44\": rpc error: code = NotFound desc = could not find container \"f6a550b63b129e7d91f1a976b8b51c44316c82fa97f31d737401bf4659034c44\": container with ID starting with f6a550b63b129e7d91f1a976b8b51c44316c82fa97f31d737401bf4659034c44 not found: ID does not exist" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.380589 5055 scope.go:117] "RemoveContainer" containerID="bdfbf6499eacebbd2302df4494462b6af1ba11994292a36742a125c15251af6c" Oct 11 07:07:27 crc kubenswrapper[5055]: E1011 07:07:27.381886 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdfbf6499eacebbd2302df4494462b6af1ba11994292a36742a125c15251af6c\": container with ID starting with bdfbf6499eacebbd2302df4494462b6af1ba11994292a36742a125c15251af6c not found: ID does not exist" containerID="bdfbf6499eacebbd2302df4494462b6af1ba11994292a36742a125c15251af6c" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.381919 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdfbf6499eacebbd2302df4494462b6af1ba11994292a36742a125c15251af6c"} err="failed to get container status \"bdfbf6499eacebbd2302df4494462b6af1ba11994292a36742a125c15251af6c\": rpc error: code = NotFound desc = could not find container \"bdfbf6499eacebbd2302df4494462b6af1ba11994292a36742a125c15251af6c\": container with ID starting with bdfbf6499eacebbd2302df4494462b6af1ba11994292a36742a125c15251af6c not found: ID does not exist" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.385343 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xdsfs\" (UniqueName: \"kubernetes.io/projected/05199d34-a391-4e72-b9b3-7864f988c137-kube-api-access-xdsfs\") pod \"05199d34-a391-4e72-b9b3-7864f988c137\" (UID: \"05199d34-a391-4e72-b9b3-7864f988c137\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.385395 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dc6dz\" (UniqueName: \"kubernetes.io/projected/8291e538-31d8-4d0e-baea-db4a67872431-kube-api-access-dc6dz\") pod \"8291e538-31d8-4d0e-baea-db4a67872431\" (UID: \"8291e538-31d8-4d0e-baea-db4a67872431\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.385423 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05199d34-a391-4e72-b9b3-7864f988c137-utilities\") pod \"05199d34-a391-4e72-b9b3-7864f988c137\" (UID: \"05199d34-a391-4e72-b9b3-7864f988c137\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.385443 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8291e538-31d8-4d0e-baea-db4a67872431-catalog-content\") pod \"8291e538-31d8-4d0e-baea-db4a67872431\" (UID: \"8291e538-31d8-4d0e-baea-db4a67872431\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.385461 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8291e538-31d8-4d0e-baea-db4a67872431-utilities\") pod \"8291e538-31d8-4d0e-baea-db4a67872431\" (UID: \"8291e538-31d8-4d0e-baea-db4a67872431\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.385533 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05199d34-a391-4e72-b9b3-7864f988c137-catalog-content\") pod \"05199d34-a391-4e72-b9b3-7864f988c137\" (UID: \"05199d34-a391-4e72-b9b3-7864f988c137\") " Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.385732 5055 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.385744 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpg7x\" (UniqueName: \"kubernetes.io/projected/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-kube-api-access-kpg7x\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.385753 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfnbb\" (UniqueName: \"kubernetes.io/projected/76714795-e16a-4a12-adca-70e428270dd0-kube-api-access-qfnbb\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.385775 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76714795-e16a-4a12-adca-70e428270dd0-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.385783 5055 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1821ec16-ef47-498d-a4cf-f72b5afcc7b0-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.385791 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76714795-e16a-4a12-adca-70e428270dd0-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.385873 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8291e538-31d8-4d0e-baea-db4a67872431-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8291e538-31d8-4d0e-baea-db4a67872431" (UID: "8291e538-31d8-4d0e-baea-db4a67872431"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.386119 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05199d34-a391-4e72-b9b3-7864f988c137-utilities" (OuterVolumeSpecName: "utilities") pod "05199d34-a391-4e72-b9b3-7864f988c137" (UID: "05199d34-a391-4e72-b9b3-7864f988c137"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.386397 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8291e538-31d8-4d0e-baea-db4a67872431-utilities" (OuterVolumeSpecName: "utilities") pod "8291e538-31d8-4d0e-baea-db4a67872431" (UID: "8291e538-31d8-4d0e-baea-db4a67872431"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.387728 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05199d34-a391-4e72-b9b3-7864f988c137-kube-api-access-xdsfs" (OuterVolumeSpecName: "kube-api-access-xdsfs") pod "05199d34-a391-4e72-b9b3-7864f988c137" (UID: "05199d34-a391-4e72-b9b3-7864f988c137"). InnerVolumeSpecName "kube-api-access-xdsfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.387975 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8291e538-31d8-4d0e-baea-db4a67872431-kube-api-access-dc6dz" (OuterVolumeSpecName: "kube-api-access-dc6dz") pod "8291e538-31d8-4d0e-baea-db4a67872431" (UID: "8291e538-31d8-4d0e-baea-db4a67872431"). InnerVolumeSpecName "kube-api-access-dc6dz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.465023 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05199d34-a391-4e72-b9b3-7864f988c137-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "05199d34-a391-4e72-b9b3-7864f988c137" (UID: "05199d34-a391-4e72-b9b3-7864f988c137"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.487283 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xdsfs\" (UniqueName: \"kubernetes.io/projected/05199d34-a391-4e72-b9b3-7864f988c137-kube-api-access-xdsfs\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.487309 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dc6dz\" (UniqueName: \"kubernetes.io/projected/8291e538-31d8-4d0e-baea-db4a67872431-kube-api-access-dc6dz\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.487333 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05199d34-a391-4e72-b9b3-7864f988c137-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.487342 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8291e538-31d8-4d0e-baea-db4a67872431-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.487350 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8291e538-31d8-4d0e-baea-db4a67872431-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.487358 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05199d34-a391-4e72-b9b3-7864f988c137-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.487374 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bqxlg"] Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.494889 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bqxlg"] Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.498599 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8n7bz"] Oct 11 07:07:27 crc kubenswrapper[5055]: I1011 07:07:27.502487 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8n7bz"] Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.180445 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rm5wf" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.180413 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rm5wf" event={"ID":"05199d34-a391-4e72-b9b3-7864f988c137","Type":"ContainerDied","Data":"3a18833b632dd2568e2a30c9f2eb2dbbd7ee899f0975b74da41c77522f294dd9"} Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.180700 5055 scope.go:117] "RemoveContainer" containerID="a2c471ac93fa17c491ea757c3212a201fb7e06b5d36c6ce1bf0ebb57518f111d" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.181901 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" event={"ID":"7530e962-328a-4b9c-8a07-c2f055845eda","Type":"ContainerStarted","Data":"a5eed28819baef8f2963f8475794940f85815204e0913d9e0f90bbae4751c70b"} Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.181930 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" event={"ID":"7530e962-328a-4b9c-8a07-c2f055845eda","Type":"ContainerStarted","Data":"a4bc5c8b520522d5710be514594d0c31f4ffb672e4300102e68bbddef0b5c4ba"} Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.183243 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.185527 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.189348 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rlrfg" event={"ID":"8291e538-31d8-4d0e-baea-db4a67872431","Type":"ContainerDied","Data":"d33b782582d3c2ecda43ad9faf015cccb28e3f97f10045111464abd03a14669a"} Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.189391 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gbjgh" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.189389 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-slk95" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.198975 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rlrfg" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.202390 5055 scope.go:117] "RemoveContainer" containerID="c5071fbb1ae6940a3a0444eb28d2fc65eb1cfc967e2d7770a101ceb19030b6ea" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.213316 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-9qmg4" podStartSLOduration=2.2132927589999998 podStartE2EDuration="2.213292759s" podCreationTimestamp="2025-10-11 07:07:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:07:28.209912053 +0000 UTC m=+831.984185890" watchObservedRunningTime="2025-10-11 07:07:28.213292759 +0000 UTC m=+831.987566566" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.242457 5055 scope.go:117] "RemoveContainer" containerID="ae71dd4ef5e4096a53b0e4c8765127c64b27e649e354a8433d403aed89a16372" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.250856 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rm5wf"] Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.253534 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rm5wf"] Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.259376 5055 scope.go:117] "RemoveContainer" containerID="21cf0367f513b7030bb057e1275ace0dae625acd73532fa9881fc777f9098902" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.287153 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rlrfg"] Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.287580 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rlrfg"] Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.297935 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-slk95"] Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.301109 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-slk95"] Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.309640 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gbjgh"] Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.311863 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gbjgh"] Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837340 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-w4m8t"] Oct 11 07:07:28 crc kubenswrapper[5055]: E1011 07:07:28.837541 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e7a03ef-3f5d-43be-9dd4-d375c36898e9" containerName="registry-server" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837556 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e7a03ef-3f5d-43be-9dd4-d375c36898e9" containerName="registry-server" Oct 11 07:07:28 crc kubenswrapper[5055]: E1011 07:07:28.837570 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d9e3839-fc39-4efc-9aaf-ff4554f45935" containerName="extract-content" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837580 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d9e3839-fc39-4efc-9aaf-ff4554f45935" containerName="extract-content" Oct 11 07:07:28 crc kubenswrapper[5055]: E1011 07:07:28.837594 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1821ec16-ef47-498d-a4cf-f72b5afcc7b0" containerName="marketplace-operator" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837602 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="1821ec16-ef47-498d-a4cf-f72b5afcc7b0" containerName="marketplace-operator" Oct 11 07:07:28 crc kubenswrapper[5055]: E1011 07:07:28.837612 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05199d34-a391-4e72-b9b3-7864f988c137" containerName="extract-utilities" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837622 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="05199d34-a391-4e72-b9b3-7864f988c137" containerName="extract-utilities" Oct 11 07:07:28 crc kubenswrapper[5055]: E1011 07:07:28.837641 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76714795-e16a-4a12-adca-70e428270dd0" containerName="extract-utilities" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837651 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="76714795-e16a-4a12-adca-70e428270dd0" containerName="extract-utilities" Oct 11 07:07:28 crc kubenswrapper[5055]: E1011 07:07:28.837664 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d9e3839-fc39-4efc-9aaf-ff4554f45935" containerName="registry-server" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837674 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d9e3839-fc39-4efc-9aaf-ff4554f45935" containerName="registry-server" Oct 11 07:07:28 crc kubenswrapper[5055]: E1011 07:07:28.837690 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e7a03ef-3f5d-43be-9dd4-d375c36898e9" containerName="extract-utilities" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837700 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e7a03ef-3f5d-43be-9dd4-d375c36898e9" containerName="extract-utilities" Oct 11 07:07:28 crc kubenswrapper[5055]: E1011 07:07:28.837716 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05199d34-a391-4e72-b9b3-7864f988c137" containerName="extract-content" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837726 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="05199d34-a391-4e72-b9b3-7864f988c137" containerName="extract-content" Oct 11 07:07:28 crc kubenswrapper[5055]: E1011 07:07:28.837740 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76714795-e16a-4a12-adca-70e428270dd0" containerName="extract-content" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837750 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="76714795-e16a-4a12-adca-70e428270dd0" containerName="extract-content" Oct 11 07:07:28 crc kubenswrapper[5055]: E1011 07:07:28.837785 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d9e3839-fc39-4efc-9aaf-ff4554f45935" containerName="extract-utilities" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837797 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d9e3839-fc39-4efc-9aaf-ff4554f45935" containerName="extract-utilities" Oct 11 07:07:28 crc kubenswrapper[5055]: E1011 07:07:28.837814 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05199d34-a391-4e72-b9b3-7864f988c137" containerName="registry-server" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837861 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="05199d34-a391-4e72-b9b3-7864f988c137" containerName="registry-server" Oct 11 07:07:28 crc kubenswrapper[5055]: E1011 07:07:28.837877 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8291e538-31d8-4d0e-baea-db4a67872431" containerName="extract-utilities" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837887 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="8291e538-31d8-4d0e-baea-db4a67872431" containerName="extract-utilities" Oct 11 07:07:28 crc kubenswrapper[5055]: E1011 07:07:28.837908 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76714795-e16a-4a12-adca-70e428270dd0" containerName="registry-server" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837918 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="76714795-e16a-4a12-adca-70e428270dd0" containerName="registry-server" Oct 11 07:07:28 crc kubenswrapper[5055]: E1011 07:07:28.837931 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e7a03ef-3f5d-43be-9dd4-d375c36898e9" containerName="extract-content" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.837940 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e7a03ef-3f5d-43be-9dd4-d375c36898e9" containerName="extract-content" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.838057 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="8291e538-31d8-4d0e-baea-db4a67872431" containerName="extract-utilities" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.838072 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="05199d34-a391-4e72-b9b3-7864f988c137" containerName="registry-server" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.838089 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="1821ec16-ef47-498d-a4cf-f72b5afcc7b0" containerName="marketplace-operator" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.838105 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e7a03ef-3f5d-43be-9dd4-d375c36898e9" containerName="registry-server" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.838118 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d9e3839-fc39-4efc-9aaf-ff4554f45935" containerName="registry-server" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.838135 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="76714795-e16a-4a12-adca-70e428270dd0" containerName="registry-server" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.839125 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.841221 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.847565 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w4m8t"] Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.908632 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbvmm\" (UniqueName: \"kubernetes.io/projected/b4326943-ad56-4da8-b6fd-df9d1f5b5cf6-kube-api-access-xbvmm\") pod \"certified-operators-w4m8t\" (UID: \"b4326943-ad56-4da8-b6fd-df9d1f5b5cf6\") " pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.908720 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4326943-ad56-4da8-b6fd-df9d1f5b5cf6-utilities\") pod \"certified-operators-w4m8t\" (UID: \"b4326943-ad56-4da8-b6fd-df9d1f5b5cf6\") " pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.908886 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4326943-ad56-4da8-b6fd-df9d1f5b5cf6-catalog-content\") pod \"certified-operators-w4m8t\" (UID: \"b4326943-ad56-4da8-b6fd-df9d1f5b5cf6\") " pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:28 crc kubenswrapper[5055]: I1011 07:07:28.999615 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05199d34-a391-4e72-b9b3-7864f988c137" path="/var/lib/kubelet/pods/05199d34-a391-4e72-b9b3-7864f988c137/volumes" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.000238 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e7a03ef-3f5d-43be-9dd4-d375c36898e9" path="/var/lib/kubelet/pods/0e7a03ef-3f5d-43be-9dd4-d375c36898e9/volumes" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.000837 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1821ec16-ef47-498d-a4cf-f72b5afcc7b0" path="/var/lib/kubelet/pods/1821ec16-ef47-498d-a4cf-f72b5afcc7b0/volumes" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.001790 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d9e3839-fc39-4efc-9aaf-ff4554f45935" path="/var/lib/kubelet/pods/3d9e3839-fc39-4efc-9aaf-ff4554f45935/volumes" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.002354 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76714795-e16a-4a12-adca-70e428270dd0" path="/var/lib/kubelet/pods/76714795-e16a-4a12-adca-70e428270dd0/volumes" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.003345 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8291e538-31d8-4d0e-baea-db4a67872431" path="/var/lib/kubelet/pods/8291e538-31d8-4d0e-baea-db4a67872431/volumes" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.010615 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4326943-ad56-4da8-b6fd-df9d1f5b5cf6-catalog-content\") pod \"certified-operators-w4m8t\" (UID: \"b4326943-ad56-4da8-b6fd-df9d1f5b5cf6\") " pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.010696 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbvmm\" (UniqueName: \"kubernetes.io/projected/b4326943-ad56-4da8-b6fd-df9d1f5b5cf6-kube-api-access-xbvmm\") pod \"certified-operators-w4m8t\" (UID: \"b4326943-ad56-4da8-b6fd-df9d1f5b5cf6\") " pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.010735 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4326943-ad56-4da8-b6fd-df9d1f5b5cf6-utilities\") pod \"certified-operators-w4m8t\" (UID: \"b4326943-ad56-4da8-b6fd-df9d1f5b5cf6\") " pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.011145 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4326943-ad56-4da8-b6fd-df9d1f5b5cf6-utilities\") pod \"certified-operators-w4m8t\" (UID: \"b4326943-ad56-4da8-b6fd-df9d1f5b5cf6\") " pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.011433 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4326943-ad56-4da8-b6fd-df9d1f5b5cf6-catalog-content\") pod \"certified-operators-w4m8t\" (UID: \"b4326943-ad56-4da8-b6fd-df9d1f5b5cf6\") " pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.036145 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbvmm\" (UniqueName: \"kubernetes.io/projected/b4326943-ad56-4da8-b6fd-df9d1f5b5cf6-kube-api-access-xbvmm\") pod \"certified-operators-w4m8t\" (UID: \"b4326943-ad56-4da8-b6fd-df9d1f5b5cf6\") " pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.163066 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.246141 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6r9kk"] Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.252547 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.313726 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g52hj\" (UniqueName: \"kubernetes.io/projected/8b84125d-bd97-477f-8a8f-8a7d29f787ff-kube-api-access-g52hj\") pod \"certified-operators-6r9kk\" (UID: \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\") " pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.313835 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b84125d-bd97-477f-8a8f-8a7d29f787ff-utilities\") pod \"certified-operators-6r9kk\" (UID: \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\") " pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.313880 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b84125d-bd97-477f-8a8f-8a7d29f787ff-catalog-content\") pod \"certified-operators-6r9kk\" (UID: \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\") " pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.335192 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6r9kk"] Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.414786 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b84125d-bd97-477f-8a8f-8a7d29f787ff-utilities\") pod \"certified-operators-6r9kk\" (UID: \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\") " pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.414834 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b84125d-bd97-477f-8a8f-8a7d29f787ff-catalog-content\") pod \"certified-operators-6r9kk\" (UID: \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\") " pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.414884 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g52hj\" (UniqueName: \"kubernetes.io/projected/8b84125d-bd97-477f-8a8f-8a7d29f787ff-kube-api-access-g52hj\") pod \"certified-operators-6r9kk\" (UID: \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\") " pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.415208 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b84125d-bd97-477f-8a8f-8a7d29f787ff-utilities\") pod \"certified-operators-6r9kk\" (UID: \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\") " pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.415227 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b84125d-bd97-477f-8a8f-8a7d29f787ff-catalog-content\") pod \"certified-operators-6r9kk\" (UID: \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\") " pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.441972 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g52hj\" (UniqueName: \"kubernetes.io/projected/8b84125d-bd97-477f-8a8f-8a7d29f787ff-kube-api-access-g52hj\") pod \"certified-operators-6r9kk\" (UID: \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\") " pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.601755 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w4m8t"] Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.608050 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.789546 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6r9kk"] Oct 11 07:07:29 crc kubenswrapper[5055]: W1011 07:07:29.822639 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b84125d_bd97_477f_8a8f_8a7d29f787ff.slice/crio-58d78266391fe4fe76a962475704cef704c3dde3abc8e9eb6cc55621da5ecea8 WatchSource:0}: Error finding container 58d78266391fe4fe76a962475704cef704c3dde3abc8e9eb6cc55621da5ecea8: Status 404 returned error can't find the container with id 58d78266391fe4fe76a962475704cef704c3dde3abc8e9eb6cc55621da5ecea8 Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.838168 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vlcm5"] Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.839071 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.841250 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.852951 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vlcm5"] Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.919367 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24acda22-b4e6-4e24-b340-4595ce200507-utilities\") pod \"community-operators-vlcm5\" (UID: \"24acda22-b4e6-4e24-b340-4595ce200507\") " pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.919416 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24acda22-b4e6-4e24-b340-4595ce200507-catalog-content\") pod \"community-operators-vlcm5\" (UID: \"24acda22-b4e6-4e24-b340-4595ce200507\") " pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:29 crc kubenswrapper[5055]: I1011 07:07:29.919469 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tlmv\" (UniqueName: \"kubernetes.io/projected/24acda22-b4e6-4e24-b340-4595ce200507-kube-api-access-2tlmv\") pod \"community-operators-vlcm5\" (UID: \"24acda22-b4e6-4e24-b340-4595ce200507\") " pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.020520 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tlmv\" (UniqueName: \"kubernetes.io/projected/24acda22-b4e6-4e24-b340-4595ce200507-kube-api-access-2tlmv\") pod \"community-operators-vlcm5\" (UID: \"24acda22-b4e6-4e24-b340-4595ce200507\") " pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.020602 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24acda22-b4e6-4e24-b340-4595ce200507-utilities\") pod \"community-operators-vlcm5\" (UID: \"24acda22-b4e6-4e24-b340-4595ce200507\") " pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.020628 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24acda22-b4e6-4e24-b340-4595ce200507-catalog-content\") pod \"community-operators-vlcm5\" (UID: \"24acda22-b4e6-4e24-b340-4595ce200507\") " pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.021749 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24acda22-b4e6-4e24-b340-4595ce200507-catalog-content\") pod \"community-operators-vlcm5\" (UID: \"24acda22-b4e6-4e24-b340-4595ce200507\") " pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.022430 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24acda22-b4e6-4e24-b340-4595ce200507-utilities\") pod \"community-operators-vlcm5\" (UID: \"24acda22-b4e6-4e24-b340-4595ce200507\") " pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.035909 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tlmv\" (UniqueName: \"kubernetes.io/projected/24acda22-b4e6-4e24-b340-4595ce200507-kube-api-access-2tlmv\") pod \"community-operators-vlcm5\" (UID: \"24acda22-b4e6-4e24-b340-4595ce200507\") " pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.154683 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.204428 5055 generic.go:334] "Generic (PLEG): container finished" podID="8b84125d-bd97-477f-8a8f-8a7d29f787ff" containerID="508adafe0afe9a414693aaeed1f87763f39c3731c1b4ac29559abdcc8203f5f4" exitCode=0 Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.204554 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6r9kk" event={"ID":"8b84125d-bd97-477f-8a8f-8a7d29f787ff","Type":"ContainerDied","Data":"508adafe0afe9a414693aaeed1f87763f39c3731c1b4ac29559abdcc8203f5f4"} Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.204903 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6r9kk" event={"ID":"8b84125d-bd97-477f-8a8f-8a7d29f787ff","Type":"ContainerStarted","Data":"58d78266391fe4fe76a962475704cef704c3dde3abc8e9eb6cc55621da5ecea8"} Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.210457 5055 generic.go:334] "Generic (PLEG): container finished" podID="b4326943-ad56-4da8-b6fd-df9d1f5b5cf6" containerID="30cdb0b8ea8c112525fc47c34e5369efe80a33786eda8cdab666bbb1dc575b4a" exitCode=0 Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.210865 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4m8t" event={"ID":"b4326943-ad56-4da8-b6fd-df9d1f5b5cf6","Type":"ContainerDied","Data":"30cdb0b8ea8c112525fc47c34e5369efe80a33786eda8cdab666bbb1dc575b4a"} Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.210888 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4m8t" event={"ID":"b4326943-ad56-4da8-b6fd-df9d1f5b5cf6","Type":"ContainerStarted","Data":"c8aab37613eaf3cef8b6a48ec703d90ac786c0500d7fd6c7f39d5db4ab5a8c68"} Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.256046 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-c6r6g"] Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.257901 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.281644 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c6r6g"] Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.328614 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vccnm\" (UniqueName: \"kubernetes.io/projected/e0900452-97df-4d66-beaa-81ab0c2feae7-kube-api-access-vccnm\") pod \"community-operators-c6r6g\" (UID: \"e0900452-97df-4d66-beaa-81ab0c2feae7\") " pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.328911 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0900452-97df-4d66-beaa-81ab0c2feae7-catalog-content\") pod \"community-operators-c6r6g\" (UID: \"e0900452-97df-4d66-beaa-81ab0c2feae7\") " pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.328990 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0900452-97df-4d66-beaa-81ab0c2feae7-utilities\") pod \"community-operators-c6r6g\" (UID: \"e0900452-97df-4d66-beaa-81ab0c2feae7\") " pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.357294 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vlcm5"] Oct 11 07:07:30 crc kubenswrapper[5055]: W1011 07:07:30.362310 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod24acda22_b4e6_4e24_b340_4595ce200507.slice/crio-16fe314f6f622d08f860191a72db515e53b71e314766c345121346f1d1d1a3a3 WatchSource:0}: Error finding container 16fe314f6f622d08f860191a72db515e53b71e314766c345121346f1d1d1a3a3: Status 404 returned error can't find the container with id 16fe314f6f622d08f860191a72db515e53b71e314766c345121346f1d1d1a3a3 Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.430248 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0900452-97df-4d66-beaa-81ab0c2feae7-utilities\") pod \"community-operators-c6r6g\" (UID: \"e0900452-97df-4d66-beaa-81ab0c2feae7\") " pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.430314 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vccnm\" (UniqueName: \"kubernetes.io/projected/e0900452-97df-4d66-beaa-81ab0c2feae7-kube-api-access-vccnm\") pod \"community-operators-c6r6g\" (UID: \"e0900452-97df-4d66-beaa-81ab0c2feae7\") " pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.430353 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0900452-97df-4d66-beaa-81ab0c2feae7-catalog-content\") pod \"community-operators-c6r6g\" (UID: \"e0900452-97df-4d66-beaa-81ab0c2feae7\") " pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.430695 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0900452-97df-4d66-beaa-81ab0c2feae7-catalog-content\") pod \"community-operators-c6r6g\" (UID: \"e0900452-97df-4d66-beaa-81ab0c2feae7\") " pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.430949 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0900452-97df-4d66-beaa-81ab0c2feae7-utilities\") pod \"community-operators-c6r6g\" (UID: \"e0900452-97df-4d66-beaa-81ab0c2feae7\") " pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.447095 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vccnm\" (UniqueName: \"kubernetes.io/projected/e0900452-97df-4d66-beaa-81ab0c2feae7-kube-api-access-vccnm\") pod \"community-operators-c6r6g\" (UID: \"e0900452-97df-4d66-beaa-81ab0c2feae7\") " pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.581257 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:30 crc kubenswrapper[5055]: I1011 07:07:30.965057 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c6r6g"] Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.216754 5055 generic.go:334] "Generic (PLEG): container finished" podID="24acda22-b4e6-4e24-b340-4595ce200507" containerID="5b8e123f417beb8f1342f088f0a56b39bef8e1f6fcb37df32a8acaec180004fa" exitCode=0 Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.216866 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vlcm5" event={"ID":"24acda22-b4e6-4e24-b340-4595ce200507","Type":"ContainerDied","Data":"5b8e123f417beb8f1342f088f0a56b39bef8e1f6fcb37df32a8acaec180004fa"} Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.216946 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vlcm5" event={"ID":"24acda22-b4e6-4e24-b340-4595ce200507","Type":"ContainerStarted","Data":"16fe314f6f622d08f860191a72db515e53b71e314766c345121346f1d1d1a3a3"} Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.218803 5055 generic.go:334] "Generic (PLEG): container finished" podID="8b84125d-bd97-477f-8a8f-8a7d29f787ff" containerID="9a2f7aff67310f37076b5cf401a4814c36b030be42029f03b30a30fb5ec13988" exitCode=0 Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.218859 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6r9kk" event={"ID":"8b84125d-bd97-477f-8a8f-8a7d29f787ff","Type":"ContainerDied","Data":"9a2f7aff67310f37076b5cf401a4814c36b030be42029f03b30a30fb5ec13988"} Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.221085 5055 generic.go:334] "Generic (PLEG): container finished" podID="e0900452-97df-4d66-beaa-81ab0c2feae7" containerID="60bfe034a736b4b268a51f646f85bfa4c11ec07738084cb3258a8244fa1455d0" exitCode=0 Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.221111 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c6r6g" event={"ID":"e0900452-97df-4d66-beaa-81ab0c2feae7","Type":"ContainerDied","Data":"60bfe034a736b4b268a51f646f85bfa4c11ec07738084cb3258a8244fa1455d0"} Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.221142 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c6r6g" event={"ID":"e0900452-97df-4d66-beaa-81ab0c2feae7","Type":"ContainerStarted","Data":"27d2509bb8f02b544b9b38dc22e5e4baec6eafb1d5f99e97785231f9e67b3289"} Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.635579 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-k25qp"] Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.637058 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.639993 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.649594 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-k25qp"] Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.744142 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5rcl\" (UniqueName: \"kubernetes.io/projected/1b333b58-deda-4c73-836d-a12c53f3adf2-kube-api-access-m5rcl\") pod \"redhat-marketplace-k25qp\" (UID: \"1b333b58-deda-4c73-836d-a12c53f3adf2\") " pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.744221 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b333b58-deda-4c73-836d-a12c53f3adf2-utilities\") pod \"redhat-marketplace-k25qp\" (UID: \"1b333b58-deda-4c73-836d-a12c53f3adf2\") " pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.744262 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b333b58-deda-4c73-836d-a12c53f3adf2-catalog-content\") pod \"redhat-marketplace-k25qp\" (UID: \"1b333b58-deda-4c73-836d-a12c53f3adf2\") " pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.845950 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5rcl\" (UniqueName: \"kubernetes.io/projected/1b333b58-deda-4c73-836d-a12c53f3adf2-kube-api-access-m5rcl\") pod \"redhat-marketplace-k25qp\" (UID: \"1b333b58-deda-4c73-836d-a12c53f3adf2\") " pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.846005 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b333b58-deda-4c73-836d-a12c53f3adf2-utilities\") pod \"redhat-marketplace-k25qp\" (UID: \"1b333b58-deda-4c73-836d-a12c53f3adf2\") " pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.846052 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b333b58-deda-4c73-836d-a12c53f3adf2-catalog-content\") pod \"redhat-marketplace-k25qp\" (UID: \"1b333b58-deda-4c73-836d-a12c53f3adf2\") " pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.847287 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b333b58-deda-4c73-836d-a12c53f3adf2-catalog-content\") pod \"redhat-marketplace-k25qp\" (UID: \"1b333b58-deda-4c73-836d-a12c53f3adf2\") " pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.847364 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b333b58-deda-4c73-836d-a12c53f3adf2-utilities\") pod \"redhat-marketplace-k25qp\" (UID: \"1b333b58-deda-4c73-836d-a12c53f3adf2\") " pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.864802 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5rcl\" (UniqueName: \"kubernetes.io/projected/1b333b58-deda-4c73-836d-a12c53f3adf2-kube-api-access-m5rcl\") pod \"redhat-marketplace-k25qp\" (UID: \"1b333b58-deda-4c73-836d-a12c53f3adf2\") " pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:31 crc kubenswrapper[5055]: I1011 07:07:31.962186 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.039305 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8jfkx"] Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.041831 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.055528 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8jfkx"] Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.151339 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e6b8e8f-6621-40a4-820e-f632b03bf39c-utilities\") pod \"redhat-marketplace-8jfkx\" (UID: \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\") " pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.151655 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e6b8e8f-6621-40a4-820e-f632b03bf39c-catalog-content\") pod \"redhat-marketplace-8jfkx\" (UID: \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\") " pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.151701 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6zwp\" (UniqueName: \"kubernetes.io/projected/9e6b8e8f-6621-40a4-820e-f632b03bf39c-kube-api-access-m6zwp\") pod \"redhat-marketplace-8jfkx\" (UID: \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\") " pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.192280 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-k25qp"] Oct 11 07:07:32 crc kubenswrapper[5055]: W1011 07:07:32.211058 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1b333b58_deda_4c73_836d_a12c53f3adf2.slice/crio-f8debcb403f95e1ad87cee0698d15e22169347a267c61fd87be286ef11083e8e WatchSource:0}: Error finding container f8debcb403f95e1ad87cee0698d15e22169347a267c61fd87be286ef11083e8e: Status 404 returned error can't find the container with id f8debcb403f95e1ad87cee0698d15e22169347a267c61fd87be286ef11083e8e Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.235862 5055 generic.go:334] "Generic (PLEG): container finished" podID="b4326943-ad56-4da8-b6fd-df9d1f5b5cf6" containerID="337835561fbb4cd113655e9eec43ce9f1b8b59ab44f3bcc92ba70a17b828fd4e" exitCode=0 Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.235933 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4m8t" event={"ID":"b4326943-ad56-4da8-b6fd-df9d1f5b5cf6","Type":"ContainerDied","Data":"337835561fbb4cd113655e9eec43ce9f1b8b59ab44f3bcc92ba70a17b828fd4e"} Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.241612 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6r9kk" event={"ID":"8b84125d-bd97-477f-8a8f-8a7d29f787ff","Type":"ContainerStarted","Data":"06207fad51b61b3ca17502eea3c494a2a70e4dd9c63f8c77abf3ec9be1feedea"} Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.244220 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c6r6g" event={"ID":"e0900452-97df-4d66-beaa-81ab0c2feae7","Type":"ContainerStarted","Data":"72f2c1dd28d033610c4d49ee5ad444d26036d7a14523f2c4c6b4dd2d1bd50860"} Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.245379 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k25qp" event={"ID":"1b333b58-deda-4c73-836d-a12c53f3adf2","Type":"ContainerStarted","Data":"f8debcb403f95e1ad87cee0698d15e22169347a267c61fd87be286ef11083e8e"} Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.253216 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6zwp\" (UniqueName: \"kubernetes.io/projected/9e6b8e8f-6621-40a4-820e-f632b03bf39c-kube-api-access-m6zwp\") pod \"redhat-marketplace-8jfkx\" (UID: \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\") " pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.270994 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e6b8e8f-6621-40a4-820e-f632b03bf39c-utilities\") pod \"redhat-marketplace-8jfkx\" (UID: \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\") " pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.271069 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e6b8e8f-6621-40a4-820e-f632b03bf39c-catalog-content\") pod \"redhat-marketplace-8jfkx\" (UID: \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\") " pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.271495 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e6b8e8f-6621-40a4-820e-f632b03bf39c-catalog-content\") pod \"redhat-marketplace-8jfkx\" (UID: \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\") " pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.271588 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6zwp\" (UniqueName: \"kubernetes.io/projected/9e6b8e8f-6621-40a4-820e-f632b03bf39c-kube-api-access-m6zwp\") pod \"redhat-marketplace-8jfkx\" (UID: \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\") " pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.272409 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e6b8e8f-6621-40a4-820e-f632b03bf39c-utilities\") pod \"redhat-marketplace-8jfkx\" (UID: \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\") " pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.296016 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6r9kk" podStartSLOduration=1.825449909 podStartE2EDuration="3.295997395s" podCreationTimestamp="2025-10-11 07:07:29 +0000 UTC" firstStartedPulling="2025-10-11 07:07:30.207315404 +0000 UTC m=+833.981589201" lastFinishedPulling="2025-10-11 07:07:31.67786288 +0000 UTC m=+835.452136687" observedRunningTime="2025-10-11 07:07:32.290858259 +0000 UTC m=+836.065132066" watchObservedRunningTime="2025-10-11 07:07:32.295997395 +0000 UTC m=+836.070271202" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.384703 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.640190 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pqw42"] Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.641828 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.645131 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.659676 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pqw42"] Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.772337 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8jfkx"] Oct 11 07:07:32 crc kubenswrapper[5055]: W1011 07:07:32.773832 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e6b8e8f_6621_40a4_820e_f632b03bf39c.slice/crio-74628e3e14b435dc37b9f424d4a8f294b46dbb624828b780d483a60609452490 WatchSource:0}: Error finding container 74628e3e14b435dc37b9f424d4a8f294b46dbb624828b780d483a60609452490: Status 404 returned error can't find the container with id 74628e3e14b435dc37b9f424d4a8f294b46dbb624828b780d483a60609452490 Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.775854 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa53a9b5-e5f4-44e4-b48c-b4f517b959a9-catalog-content\") pod \"redhat-operators-pqw42\" (UID: \"aa53a9b5-e5f4-44e4-b48c-b4f517b959a9\") " pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.775909 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scmcx\" (UniqueName: \"kubernetes.io/projected/aa53a9b5-e5f4-44e4-b48c-b4f517b959a9-kube-api-access-scmcx\") pod \"redhat-operators-pqw42\" (UID: \"aa53a9b5-e5f4-44e4-b48c-b4f517b959a9\") " pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.775933 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa53a9b5-e5f4-44e4-b48c-b4f517b959a9-utilities\") pod \"redhat-operators-pqw42\" (UID: \"aa53a9b5-e5f4-44e4-b48c-b4f517b959a9\") " pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.876874 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa53a9b5-e5f4-44e4-b48c-b4f517b959a9-catalog-content\") pod \"redhat-operators-pqw42\" (UID: \"aa53a9b5-e5f4-44e4-b48c-b4f517b959a9\") " pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.876947 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa53a9b5-e5f4-44e4-b48c-b4f517b959a9-utilities\") pod \"redhat-operators-pqw42\" (UID: \"aa53a9b5-e5f4-44e4-b48c-b4f517b959a9\") " pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.876972 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scmcx\" (UniqueName: \"kubernetes.io/projected/aa53a9b5-e5f4-44e4-b48c-b4f517b959a9-kube-api-access-scmcx\") pod \"redhat-operators-pqw42\" (UID: \"aa53a9b5-e5f4-44e4-b48c-b4f517b959a9\") " pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.877374 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa53a9b5-e5f4-44e4-b48c-b4f517b959a9-utilities\") pod \"redhat-operators-pqw42\" (UID: \"aa53a9b5-e5f4-44e4-b48c-b4f517b959a9\") " pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.877443 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa53a9b5-e5f4-44e4-b48c-b4f517b959a9-catalog-content\") pod \"redhat-operators-pqw42\" (UID: \"aa53a9b5-e5f4-44e4-b48c-b4f517b959a9\") " pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.896523 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scmcx\" (UniqueName: \"kubernetes.io/projected/aa53a9b5-e5f4-44e4-b48c-b4f517b959a9-kube-api-access-scmcx\") pod \"redhat-operators-pqw42\" (UID: \"aa53a9b5-e5f4-44e4-b48c-b4f517b959a9\") " pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:32 crc kubenswrapper[5055]: I1011 07:07:32.961154 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:33 crc kubenswrapper[5055]: I1011 07:07:33.140504 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pqw42"] Oct 11 07:07:33 crc kubenswrapper[5055]: W1011 07:07:33.149976 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa53a9b5_e5f4_44e4_b48c_b4f517b959a9.slice/crio-b235105dd967b5c44d54b62436c78e28e7ff9cdb710dc24cfc6347d6229d53a4 WatchSource:0}: Error finding container b235105dd967b5c44d54b62436c78e28e7ff9cdb710dc24cfc6347d6229d53a4: Status 404 returned error can't find the container with id b235105dd967b5c44d54b62436c78e28e7ff9cdb710dc24cfc6347d6229d53a4 Oct 11 07:07:33 crc kubenswrapper[5055]: I1011 07:07:33.252702 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w4m8t" event={"ID":"b4326943-ad56-4da8-b6fd-df9d1f5b5cf6","Type":"ContainerStarted","Data":"9920c1e39a4dd5d1495241ffa7486b89fee0b13d0dae89085ccfa4881a593aae"} Oct 11 07:07:33 crc kubenswrapper[5055]: I1011 07:07:33.253631 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqw42" event={"ID":"aa53a9b5-e5f4-44e4-b48c-b4f517b959a9","Type":"ContainerStarted","Data":"b235105dd967b5c44d54b62436c78e28e7ff9cdb710dc24cfc6347d6229d53a4"} Oct 11 07:07:33 crc kubenswrapper[5055]: I1011 07:07:33.255298 5055 generic.go:334] "Generic (PLEG): container finished" podID="24acda22-b4e6-4e24-b340-4595ce200507" containerID="8b5e9c44a02be397843d499c531960ee93b242aaae9459a5df15b432fab14acd" exitCode=0 Oct 11 07:07:33 crc kubenswrapper[5055]: I1011 07:07:33.255359 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vlcm5" event={"ID":"24acda22-b4e6-4e24-b340-4595ce200507","Type":"ContainerDied","Data":"8b5e9c44a02be397843d499c531960ee93b242aaae9459a5df15b432fab14acd"} Oct 11 07:07:33 crc kubenswrapper[5055]: I1011 07:07:33.260490 5055 generic.go:334] "Generic (PLEG): container finished" podID="e0900452-97df-4d66-beaa-81ab0c2feae7" containerID="72f2c1dd28d033610c4d49ee5ad444d26036d7a14523f2c4c6b4dd2d1bd50860" exitCode=0 Oct 11 07:07:33 crc kubenswrapper[5055]: I1011 07:07:33.260733 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c6r6g" event={"ID":"e0900452-97df-4d66-beaa-81ab0c2feae7","Type":"ContainerDied","Data":"72f2c1dd28d033610c4d49ee5ad444d26036d7a14523f2c4c6b4dd2d1bd50860"} Oct 11 07:07:33 crc kubenswrapper[5055]: I1011 07:07:33.262368 5055 generic.go:334] "Generic (PLEG): container finished" podID="1b333b58-deda-4c73-836d-a12c53f3adf2" containerID="764cec9f7452a8a400a0d456f6e691a36f817d93ce9a40caa1971591072cafd6" exitCode=0 Oct 11 07:07:33 crc kubenswrapper[5055]: I1011 07:07:33.262411 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k25qp" event={"ID":"1b333b58-deda-4c73-836d-a12c53f3adf2","Type":"ContainerDied","Data":"764cec9f7452a8a400a0d456f6e691a36f817d93ce9a40caa1971591072cafd6"} Oct 11 07:07:33 crc kubenswrapper[5055]: I1011 07:07:33.272594 5055 generic.go:334] "Generic (PLEG): container finished" podID="9e6b8e8f-6621-40a4-820e-f632b03bf39c" containerID="1cf1a1240c6f91a3de89e5dd546851411c4ed6cd9b266a062b78c94bf13372c3" exitCode=0 Oct 11 07:07:33 crc kubenswrapper[5055]: I1011 07:07:33.272688 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8jfkx" event={"ID":"9e6b8e8f-6621-40a4-820e-f632b03bf39c","Type":"ContainerDied","Data":"1cf1a1240c6f91a3de89e5dd546851411c4ed6cd9b266a062b78c94bf13372c3"} Oct 11 07:07:33 crc kubenswrapper[5055]: I1011 07:07:33.272715 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8jfkx" event={"ID":"9e6b8e8f-6621-40a4-820e-f632b03bf39c","Type":"ContainerStarted","Data":"74628e3e14b435dc37b9f424d4a8f294b46dbb624828b780d483a60609452490"} Oct 11 07:07:33 crc kubenswrapper[5055]: I1011 07:07:33.278465 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-w4m8t" podStartSLOduration=2.897941284 podStartE2EDuration="5.278444334s" podCreationTimestamp="2025-10-11 07:07:28 +0000 UTC" firstStartedPulling="2025-10-11 07:07:30.212075539 +0000 UTC m=+833.986349346" lastFinishedPulling="2025-10-11 07:07:32.592578589 +0000 UTC m=+836.366852396" observedRunningTime="2025-10-11 07:07:33.269349866 +0000 UTC m=+837.043623673" watchObservedRunningTime="2025-10-11 07:07:33.278444334 +0000 UTC m=+837.052718141" Oct 11 07:07:34 crc kubenswrapper[5055]: I1011 07:07:34.280664 5055 generic.go:334] "Generic (PLEG): container finished" podID="aa53a9b5-e5f4-44e4-b48c-b4f517b959a9" containerID="86440f16ac77d7f8879686941e5dc2b4e9340b89fc19425acfce1e3b7a5635d5" exitCode=0 Oct 11 07:07:34 crc kubenswrapper[5055]: I1011 07:07:34.280806 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqw42" event={"ID":"aa53a9b5-e5f4-44e4-b48c-b4f517b959a9","Type":"ContainerDied","Data":"86440f16ac77d7f8879686941e5dc2b4e9340b89fc19425acfce1e3b7a5635d5"} Oct 11 07:07:34 crc kubenswrapper[5055]: I1011 07:07:34.286488 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vlcm5" event={"ID":"24acda22-b4e6-4e24-b340-4595ce200507","Type":"ContainerStarted","Data":"d27e3d917919fc6c68033cbbda44871a16afae86753fd3791f85e9bd93b3692e"} Oct 11 07:07:34 crc kubenswrapper[5055]: I1011 07:07:34.288975 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c6r6g" event={"ID":"e0900452-97df-4d66-beaa-81ab0c2feae7","Type":"ContainerStarted","Data":"29873b127c163a550e008929fa6f459f32d2bcd596e432dbed7174e7e4cc8e81"} Oct 11 07:07:34 crc kubenswrapper[5055]: I1011 07:07:34.290956 5055 generic.go:334] "Generic (PLEG): container finished" podID="9e6b8e8f-6621-40a4-820e-f632b03bf39c" containerID="fcd7b511cd2ef48ec130ca56e7ecc89bbac7a42ca7c8ff34d6b7be4465859c16" exitCode=0 Oct 11 07:07:34 crc kubenswrapper[5055]: I1011 07:07:34.291026 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8jfkx" event={"ID":"9e6b8e8f-6621-40a4-820e-f632b03bf39c","Type":"ContainerDied","Data":"fcd7b511cd2ef48ec130ca56e7ecc89bbac7a42ca7c8ff34d6b7be4465859c16"} Oct 11 07:07:34 crc kubenswrapper[5055]: I1011 07:07:34.315621 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-c6r6g" podStartSLOduration=1.812088647 podStartE2EDuration="4.315606306s" podCreationTimestamp="2025-10-11 07:07:30 +0000 UTC" firstStartedPulling="2025-10-11 07:07:31.222143362 +0000 UTC m=+834.996417179" lastFinishedPulling="2025-10-11 07:07:33.725661021 +0000 UTC m=+837.499934838" observedRunningTime="2025-10-11 07:07:34.313823756 +0000 UTC m=+838.088097593" watchObservedRunningTime="2025-10-11 07:07:34.315606306 +0000 UTC m=+838.089880113" Oct 11 07:07:34 crc kubenswrapper[5055]: I1011 07:07:34.328315 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vlcm5" podStartSLOduration=2.8014009140000002 podStartE2EDuration="5.328295866s" podCreationTimestamp="2025-10-11 07:07:29 +0000 UTC" firstStartedPulling="2025-10-11 07:07:31.218710615 +0000 UTC m=+834.992984432" lastFinishedPulling="2025-10-11 07:07:33.745605577 +0000 UTC m=+837.519879384" observedRunningTime="2025-10-11 07:07:34.326722652 +0000 UTC m=+838.100996459" watchObservedRunningTime="2025-10-11 07:07:34.328295866 +0000 UTC m=+838.102569673" Oct 11 07:07:35 crc kubenswrapper[5055]: I1011 07:07:35.301868 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqw42" event={"ID":"aa53a9b5-e5f4-44e4-b48c-b4f517b959a9","Type":"ContainerStarted","Data":"7ec5a79884c21eddf88ad1eb66fe31c7e3e0c1834c8e2a5eba870d52b0058619"} Oct 11 07:07:35 crc kubenswrapper[5055]: I1011 07:07:35.321005 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k25qp" event={"ID":"1b333b58-deda-4c73-836d-a12c53f3adf2","Type":"ContainerDied","Data":"fc5b3c7b4a8da4b537b2b248b68aba971ef2b033b9fd00059212a71c12789f17"} Oct 11 07:07:35 crc kubenswrapper[5055]: I1011 07:07:35.320971 5055 generic.go:334] "Generic (PLEG): container finished" podID="1b333b58-deda-4c73-836d-a12c53f3adf2" containerID="fc5b3c7b4a8da4b537b2b248b68aba971ef2b033b9fd00059212a71c12789f17" exitCode=0 Oct 11 07:07:35 crc kubenswrapper[5055]: I1011 07:07:35.325668 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8jfkx" event={"ID":"9e6b8e8f-6621-40a4-820e-f632b03bf39c","Type":"ContainerStarted","Data":"d9568c0764d2a788e2e07657b82c924dafeb1341d1ac44df6aee5adfa5e874e7"} Oct 11 07:07:35 crc kubenswrapper[5055]: I1011 07:07:35.345723 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8jfkx" podStartSLOduration=1.951128346 podStartE2EDuration="3.345706338s" podCreationTimestamp="2025-10-11 07:07:32 +0000 UTC" firstStartedPulling="2025-10-11 07:07:33.273800432 +0000 UTC m=+837.048074239" lastFinishedPulling="2025-10-11 07:07:34.668378434 +0000 UTC m=+838.442652231" observedRunningTime="2025-10-11 07:07:35.342679792 +0000 UTC m=+839.116953599" watchObservedRunningTime="2025-10-11 07:07:35.345706338 +0000 UTC m=+839.119980145" Oct 11 07:07:36 crc kubenswrapper[5055]: I1011 07:07:36.331840 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-k25qp" event={"ID":"1b333b58-deda-4c73-836d-a12c53f3adf2","Type":"ContainerStarted","Data":"1af8fc6251b170c4c1b6f9e108f3406b610da3cafee83b33f0b7c7162f1afd23"} Oct 11 07:07:36 crc kubenswrapper[5055]: I1011 07:07:36.333395 5055 generic.go:334] "Generic (PLEG): container finished" podID="aa53a9b5-e5f4-44e4-b48c-b4f517b959a9" containerID="7ec5a79884c21eddf88ad1eb66fe31c7e3e0c1834c8e2a5eba870d52b0058619" exitCode=0 Oct 11 07:07:36 crc kubenswrapper[5055]: I1011 07:07:36.333439 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqw42" event={"ID":"aa53a9b5-e5f4-44e4-b48c-b4f517b959a9","Type":"ContainerDied","Data":"7ec5a79884c21eddf88ad1eb66fe31c7e3e0c1834c8e2a5eba870d52b0058619"} Oct 11 07:07:36 crc kubenswrapper[5055]: I1011 07:07:36.351911 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-k25qp" podStartSLOduration=2.933543618 podStartE2EDuration="5.351892241s" podCreationTimestamp="2025-10-11 07:07:31 +0000 UTC" firstStartedPulling="2025-10-11 07:07:33.264692374 +0000 UTC m=+837.038966181" lastFinishedPulling="2025-10-11 07:07:35.683040997 +0000 UTC m=+839.457314804" observedRunningTime="2025-10-11 07:07:36.350016098 +0000 UTC m=+840.124289905" watchObservedRunningTime="2025-10-11 07:07:36.351892241 +0000 UTC m=+840.126166038" Oct 11 07:07:37 crc kubenswrapper[5055]: I1011 07:07:37.018742 5055 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","burstable","podf87c4acd-8aee-4f7c-b7b0-49b7b43d9cad"] err="unable to destroy cgroup paths for cgroup [kubepods burstable podf87c4acd-8aee-4f7c-b7b0-49b7b43d9cad] : Timed out while waiting for systemd to remove kubepods-burstable-podf87c4acd_8aee_4f7c_b7b0_49b7b43d9cad.slice" Oct 11 07:07:37 crc kubenswrapper[5055]: I1011 07:07:37.145750 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-k5sls" Oct 11 07:07:37 crc kubenswrapper[5055]: I1011 07:07:37.206648 5055 scope.go:117] "RemoveContainer" containerID="a387875d6d84f6bee6a3f27d2e82d22a879757e89df7b12b003953fdfa9251e2" Oct 11 07:07:37 crc kubenswrapper[5055]: I1011 07:07:37.225740 5055 scope.go:117] "RemoveContainer" containerID="78ef318c54e4e91ead301df5c98700d1f92387130cd5a72ec59642eec852d3f3" Oct 11 07:07:37 crc kubenswrapper[5055]: I1011 07:07:37.255150 5055 scope.go:117] "RemoveContainer" containerID="48f0c97ecd25437f6451d97cc35c2b3b3754bce21c67f204305dc89dc36a3c07" Oct 11 07:07:37 crc kubenswrapper[5055]: I1011 07:07:37.287620 5055 scope.go:117] "RemoveContainer" containerID="3c02583cfa1ad23f58a6e7cc23c4ddb2dd55a0c305c5c9539076186e7911e8a0" Oct 11 07:07:37 crc kubenswrapper[5055]: I1011 07:07:37.310535 5055 scope.go:117] "RemoveContainer" containerID="649eaa9ef8bd7b297db51d6ef876f638cb0f10efb41df3e6cb169ac227f7f849" Oct 11 07:07:37 crc kubenswrapper[5055]: I1011 07:07:37.345987 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqw42" event={"ID":"aa53a9b5-e5f4-44e4-b48c-b4f517b959a9","Type":"ContainerStarted","Data":"34eee02091b89bbf501ccfed6f6c97bfbea79f180e17544d6c7cf5a03f5d691c"} Oct 11 07:07:37 crc kubenswrapper[5055]: I1011 07:07:37.362013 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pqw42" podStartSLOduration=2.914380022 podStartE2EDuration="5.361996395s" podCreationTimestamp="2025-10-11 07:07:32 +0000 UTC" firstStartedPulling="2025-10-11 07:07:34.28225565 +0000 UTC m=+838.056529457" lastFinishedPulling="2025-10-11 07:07:36.729872023 +0000 UTC m=+840.504145830" observedRunningTime="2025-10-11 07:07:37.360108151 +0000 UTC m=+841.134381958" watchObservedRunningTime="2025-10-11 07:07:37.361996395 +0000 UTC m=+841.136270202" Oct 11 07:07:38 crc kubenswrapper[5055]: I1011 07:07:38.352583 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-4lplf_c2f344f5-5570-4fb6-b59d-5b881cd1d2cc/kube-multus/2.log" Oct 11 07:07:39 crc kubenswrapper[5055]: I1011 07:07:39.163930 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:39 crc kubenswrapper[5055]: I1011 07:07:39.164196 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:39 crc kubenswrapper[5055]: I1011 07:07:39.224930 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:39 crc kubenswrapper[5055]: I1011 07:07:39.402841 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-w4m8t" Oct 11 07:07:39 crc kubenswrapper[5055]: I1011 07:07:39.608621 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:39 crc kubenswrapper[5055]: I1011 07:07:39.608687 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:39 crc kubenswrapper[5055]: I1011 07:07:39.645249 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:40 crc kubenswrapper[5055]: I1011 07:07:40.155255 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:40 crc kubenswrapper[5055]: I1011 07:07:40.156010 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:40 crc kubenswrapper[5055]: I1011 07:07:40.188899 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:40 crc kubenswrapper[5055]: I1011 07:07:40.402196 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:07:40 crc kubenswrapper[5055]: I1011 07:07:40.406409 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:40 crc kubenswrapper[5055]: I1011 07:07:40.582163 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:40 crc kubenswrapper[5055]: I1011 07:07:40.582252 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:40 crc kubenswrapper[5055]: I1011 07:07:40.618065 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:41 crc kubenswrapper[5055]: I1011 07:07:41.428705 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:41 crc kubenswrapper[5055]: I1011 07:07:41.962724 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:41 crc kubenswrapper[5055]: I1011 07:07:41.963283 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:42 crc kubenswrapper[5055]: I1011 07:07:42.005301 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:42 crc kubenswrapper[5055]: I1011 07:07:42.385517 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:42 crc kubenswrapper[5055]: I1011 07:07:42.385739 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:42 crc kubenswrapper[5055]: I1011 07:07:42.410324 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-k25qp" Oct 11 07:07:42 crc kubenswrapper[5055]: I1011 07:07:42.429792 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:42 crc kubenswrapper[5055]: I1011 07:07:42.430818 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6r9kk"] Oct 11 07:07:42 crc kubenswrapper[5055]: I1011 07:07:42.431070 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6r9kk" podUID="8b84125d-bd97-477f-8a8f-8a7d29f787ff" containerName="registry-server" containerID="cri-o://06207fad51b61b3ca17502eea3c494a2a70e4dd9c63f8c77abf3ec9be1feedea" gracePeriod=2 Oct 11 07:07:42 crc kubenswrapper[5055]: I1011 07:07:42.961896 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:42 crc kubenswrapper[5055]: I1011 07:07:42.962866 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.002317 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.028310 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c6r6g"] Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.382408 5055 generic.go:334] "Generic (PLEG): container finished" podID="8b84125d-bd97-477f-8a8f-8a7d29f787ff" containerID="06207fad51b61b3ca17502eea3c494a2a70e4dd9c63f8c77abf3ec9be1feedea" exitCode=0 Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.382560 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6r9kk" event={"ID":"8b84125d-bd97-477f-8a8f-8a7d29f787ff","Type":"ContainerDied","Data":"06207fad51b61b3ca17502eea3c494a2a70e4dd9c63f8c77abf3ec9be1feedea"} Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.418465 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.430841 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pqw42" Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.595564 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.715694 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b84125d-bd97-477f-8a8f-8a7d29f787ff-utilities\") pod \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\" (UID: \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\") " Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.715742 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g52hj\" (UniqueName: \"kubernetes.io/projected/8b84125d-bd97-477f-8a8f-8a7d29f787ff-kube-api-access-g52hj\") pod \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\" (UID: \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\") " Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.715812 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b84125d-bd97-477f-8a8f-8a7d29f787ff-catalog-content\") pod \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\" (UID: \"8b84125d-bd97-477f-8a8f-8a7d29f787ff\") " Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.716602 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b84125d-bd97-477f-8a8f-8a7d29f787ff-utilities" (OuterVolumeSpecName: "utilities") pod "8b84125d-bd97-477f-8a8f-8a7d29f787ff" (UID: "8b84125d-bd97-477f-8a8f-8a7d29f787ff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.721567 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b84125d-bd97-477f-8a8f-8a7d29f787ff-kube-api-access-g52hj" (OuterVolumeSpecName: "kube-api-access-g52hj") pod "8b84125d-bd97-477f-8a8f-8a7d29f787ff" (UID: "8b84125d-bd97-477f-8a8f-8a7d29f787ff"). InnerVolumeSpecName "kube-api-access-g52hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.761228 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b84125d-bd97-477f-8a8f-8a7d29f787ff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8b84125d-bd97-477f-8a8f-8a7d29f787ff" (UID: "8b84125d-bd97-477f-8a8f-8a7d29f787ff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.817365 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b84125d-bd97-477f-8a8f-8a7d29f787ff-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.817692 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g52hj\" (UniqueName: \"kubernetes.io/projected/8b84125d-bd97-477f-8a8f-8a7d29f787ff-kube-api-access-g52hj\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:43 crc kubenswrapper[5055]: I1011 07:07:43.817715 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b84125d-bd97-477f-8a8f-8a7d29f787ff-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:44 crc kubenswrapper[5055]: I1011 07:07:44.389866 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6r9kk" Oct 11 07:07:44 crc kubenswrapper[5055]: I1011 07:07:44.389891 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6r9kk" event={"ID":"8b84125d-bd97-477f-8a8f-8a7d29f787ff","Type":"ContainerDied","Data":"58d78266391fe4fe76a962475704cef704c3dde3abc8e9eb6cc55621da5ecea8"} Oct 11 07:07:44 crc kubenswrapper[5055]: I1011 07:07:44.389952 5055 scope.go:117] "RemoveContainer" containerID="06207fad51b61b3ca17502eea3c494a2a70e4dd9c63f8c77abf3ec9be1feedea" Oct 11 07:07:44 crc kubenswrapper[5055]: I1011 07:07:44.389998 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-c6r6g" podUID="e0900452-97df-4d66-beaa-81ab0c2feae7" containerName="registry-server" containerID="cri-o://29873b127c163a550e008929fa6f459f32d2bcd596e432dbed7174e7e4cc8e81" gracePeriod=2 Oct 11 07:07:44 crc kubenswrapper[5055]: I1011 07:07:44.409844 5055 scope.go:117] "RemoveContainer" containerID="9a2f7aff67310f37076b5cf401a4814c36b030be42029f03b30a30fb5ec13988" Oct 11 07:07:44 crc kubenswrapper[5055]: I1011 07:07:44.420999 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6r9kk"] Oct 11 07:07:44 crc kubenswrapper[5055]: I1011 07:07:44.424057 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6r9kk"] Oct 11 07:07:44 crc kubenswrapper[5055]: I1011 07:07:44.429090 5055 scope.go:117] "RemoveContainer" containerID="508adafe0afe9a414693aaeed1f87763f39c3731c1b4ac29559abdcc8203f5f4" Oct 11 07:07:44 crc kubenswrapper[5055]: I1011 07:07:44.829302 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8jfkx"] Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.001834 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b84125d-bd97-477f-8a8f-8a7d29f787ff" path="/var/lib/kubelet/pods/8b84125d-bd97-477f-8a8f-8a7d29f787ff/volumes" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.238797 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.335906 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0900452-97df-4d66-beaa-81ab0c2feae7-utilities\") pod \"e0900452-97df-4d66-beaa-81ab0c2feae7\" (UID: \"e0900452-97df-4d66-beaa-81ab0c2feae7\") " Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.335983 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0900452-97df-4d66-beaa-81ab0c2feae7-catalog-content\") pod \"e0900452-97df-4d66-beaa-81ab0c2feae7\" (UID: \"e0900452-97df-4d66-beaa-81ab0c2feae7\") " Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.336023 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vccnm\" (UniqueName: \"kubernetes.io/projected/e0900452-97df-4d66-beaa-81ab0c2feae7-kube-api-access-vccnm\") pod \"e0900452-97df-4d66-beaa-81ab0c2feae7\" (UID: \"e0900452-97df-4d66-beaa-81ab0c2feae7\") " Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.336926 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0900452-97df-4d66-beaa-81ab0c2feae7-utilities" (OuterVolumeSpecName: "utilities") pod "e0900452-97df-4d66-beaa-81ab0c2feae7" (UID: "e0900452-97df-4d66-beaa-81ab0c2feae7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.341884 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0900452-97df-4d66-beaa-81ab0c2feae7-kube-api-access-vccnm" (OuterVolumeSpecName: "kube-api-access-vccnm") pod "e0900452-97df-4d66-beaa-81ab0c2feae7" (UID: "e0900452-97df-4d66-beaa-81ab0c2feae7"). InnerVolumeSpecName "kube-api-access-vccnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.392668 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0900452-97df-4d66-beaa-81ab0c2feae7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e0900452-97df-4d66-beaa-81ab0c2feae7" (UID: "e0900452-97df-4d66-beaa-81ab0c2feae7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.396947 5055 generic.go:334] "Generic (PLEG): container finished" podID="e0900452-97df-4d66-beaa-81ab0c2feae7" containerID="29873b127c163a550e008929fa6f459f32d2bcd596e432dbed7174e7e4cc8e81" exitCode=0 Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.396983 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c6r6g" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.397008 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c6r6g" event={"ID":"e0900452-97df-4d66-beaa-81ab0c2feae7","Type":"ContainerDied","Data":"29873b127c163a550e008929fa6f459f32d2bcd596e432dbed7174e7e4cc8e81"} Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.397036 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c6r6g" event={"ID":"e0900452-97df-4d66-beaa-81ab0c2feae7","Type":"ContainerDied","Data":"27d2509bb8f02b544b9b38dc22e5e4baec6eafb1d5f99e97785231f9e67b3289"} Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.397053 5055 scope.go:117] "RemoveContainer" containerID="29873b127c163a550e008929fa6f459f32d2bcd596e432dbed7174e7e4cc8e81" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.415664 5055 scope.go:117] "RemoveContainer" containerID="72f2c1dd28d033610c4d49ee5ad444d26036d7a14523f2c4c6b4dd2d1bd50860" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.430722 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c6r6g"] Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.439728 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0900452-97df-4d66-beaa-81ab0c2feae7-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.439791 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0900452-97df-4d66-beaa-81ab0c2feae7-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.439805 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vccnm\" (UniqueName: \"kubernetes.io/projected/e0900452-97df-4d66-beaa-81ab0c2feae7-kube-api-access-vccnm\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.443155 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-c6r6g"] Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.462740 5055 scope.go:117] "RemoveContainer" containerID="60bfe034a736b4b268a51f646f85bfa4c11ec07738084cb3258a8244fa1455d0" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.478989 5055 scope.go:117] "RemoveContainer" containerID="29873b127c163a550e008929fa6f459f32d2bcd596e432dbed7174e7e4cc8e81" Oct 11 07:07:45 crc kubenswrapper[5055]: E1011 07:07:45.479375 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29873b127c163a550e008929fa6f459f32d2bcd596e432dbed7174e7e4cc8e81\": container with ID starting with 29873b127c163a550e008929fa6f459f32d2bcd596e432dbed7174e7e4cc8e81 not found: ID does not exist" containerID="29873b127c163a550e008929fa6f459f32d2bcd596e432dbed7174e7e4cc8e81" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.479404 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29873b127c163a550e008929fa6f459f32d2bcd596e432dbed7174e7e4cc8e81"} err="failed to get container status \"29873b127c163a550e008929fa6f459f32d2bcd596e432dbed7174e7e4cc8e81\": rpc error: code = NotFound desc = could not find container \"29873b127c163a550e008929fa6f459f32d2bcd596e432dbed7174e7e4cc8e81\": container with ID starting with 29873b127c163a550e008929fa6f459f32d2bcd596e432dbed7174e7e4cc8e81 not found: ID does not exist" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.479438 5055 scope.go:117] "RemoveContainer" containerID="72f2c1dd28d033610c4d49ee5ad444d26036d7a14523f2c4c6b4dd2d1bd50860" Oct 11 07:07:45 crc kubenswrapper[5055]: E1011 07:07:45.479635 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72f2c1dd28d033610c4d49ee5ad444d26036d7a14523f2c4c6b4dd2d1bd50860\": container with ID starting with 72f2c1dd28d033610c4d49ee5ad444d26036d7a14523f2c4c6b4dd2d1bd50860 not found: ID does not exist" containerID="72f2c1dd28d033610c4d49ee5ad444d26036d7a14523f2c4c6b4dd2d1bd50860" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.479653 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72f2c1dd28d033610c4d49ee5ad444d26036d7a14523f2c4c6b4dd2d1bd50860"} err="failed to get container status \"72f2c1dd28d033610c4d49ee5ad444d26036d7a14523f2c4c6b4dd2d1bd50860\": rpc error: code = NotFound desc = could not find container \"72f2c1dd28d033610c4d49ee5ad444d26036d7a14523f2c4c6b4dd2d1bd50860\": container with ID starting with 72f2c1dd28d033610c4d49ee5ad444d26036d7a14523f2c4c6b4dd2d1bd50860 not found: ID does not exist" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.479664 5055 scope.go:117] "RemoveContainer" containerID="60bfe034a736b4b268a51f646f85bfa4c11ec07738084cb3258a8244fa1455d0" Oct 11 07:07:45 crc kubenswrapper[5055]: E1011 07:07:45.479942 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60bfe034a736b4b268a51f646f85bfa4c11ec07738084cb3258a8244fa1455d0\": container with ID starting with 60bfe034a736b4b268a51f646f85bfa4c11ec07738084cb3258a8244fa1455d0 not found: ID does not exist" containerID="60bfe034a736b4b268a51f646f85bfa4c11ec07738084cb3258a8244fa1455d0" Oct 11 07:07:45 crc kubenswrapper[5055]: I1011 07:07:45.479961 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60bfe034a736b4b268a51f646f85bfa4c11ec07738084cb3258a8244fa1455d0"} err="failed to get container status \"60bfe034a736b4b268a51f646f85bfa4c11ec07738084cb3258a8244fa1455d0\": rpc error: code = NotFound desc = could not find container \"60bfe034a736b4b268a51f646f85bfa4c11ec07738084cb3258a8244fa1455d0\": container with ID starting with 60bfe034a736b4b268a51f646f85bfa4c11ec07738084cb3258a8244fa1455d0 not found: ID does not exist" Oct 11 07:07:46 crc kubenswrapper[5055]: I1011 07:07:46.405176 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8jfkx" podUID="9e6b8e8f-6621-40a4-820e-f632b03bf39c" containerName="registry-server" containerID="cri-o://d9568c0764d2a788e2e07657b82c924dafeb1341d1ac44df6aee5adfa5e874e7" gracePeriod=2 Oct 11 07:07:46 crc kubenswrapper[5055]: I1011 07:07:46.794302 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:46 crc kubenswrapper[5055]: I1011 07:07:46.854151 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6zwp\" (UniqueName: \"kubernetes.io/projected/9e6b8e8f-6621-40a4-820e-f632b03bf39c-kube-api-access-m6zwp\") pod \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\" (UID: \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\") " Oct 11 07:07:46 crc kubenswrapper[5055]: I1011 07:07:46.854224 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e6b8e8f-6621-40a4-820e-f632b03bf39c-utilities\") pod \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\" (UID: \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\") " Oct 11 07:07:46 crc kubenswrapper[5055]: I1011 07:07:46.854246 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e6b8e8f-6621-40a4-820e-f632b03bf39c-catalog-content\") pod \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\" (UID: \"9e6b8e8f-6621-40a4-820e-f632b03bf39c\") " Oct 11 07:07:46 crc kubenswrapper[5055]: I1011 07:07:46.855548 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e6b8e8f-6621-40a4-820e-f632b03bf39c-utilities" (OuterVolumeSpecName: "utilities") pod "9e6b8e8f-6621-40a4-820e-f632b03bf39c" (UID: "9e6b8e8f-6621-40a4-820e-f632b03bf39c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:46 crc kubenswrapper[5055]: I1011 07:07:46.862914 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e6b8e8f-6621-40a4-820e-f632b03bf39c-kube-api-access-m6zwp" (OuterVolumeSpecName: "kube-api-access-m6zwp") pod "9e6b8e8f-6621-40a4-820e-f632b03bf39c" (UID: "9e6b8e8f-6621-40a4-820e-f632b03bf39c"). InnerVolumeSpecName "kube-api-access-m6zwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:07:46 crc kubenswrapper[5055]: I1011 07:07:46.868379 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e6b8e8f-6621-40a4-820e-f632b03bf39c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9e6b8e8f-6621-40a4-820e-f632b03bf39c" (UID: "9e6b8e8f-6621-40a4-820e-f632b03bf39c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:07:46 crc kubenswrapper[5055]: I1011 07:07:46.955268 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6zwp\" (UniqueName: \"kubernetes.io/projected/9e6b8e8f-6621-40a4-820e-f632b03bf39c-kube-api-access-m6zwp\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:46 crc kubenswrapper[5055]: I1011 07:07:46.955326 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e6b8e8f-6621-40a4-820e-f632b03bf39c-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:46 crc kubenswrapper[5055]: I1011 07:07:46.955336 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e6b8e8f-6621-40a4-820e-f632b03bf39c-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:46.999970 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0900452-97df-4d66-beaa-81ab0c2feae7" path="/var/lib/kubelet/pods/e0900452-97df-4d66-beaa-81ab0c2feae7/volumes" Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.411389 5055 generic.go:334] "Generic (PLEG): container finished" podID="9e6b8e8f-6621-40a4-820e-f632b03bf39c" containerID="d9568c0764d2a788e2e07657b82c924dafeb1341d1ac44df6aee5adfa5e874e7" exitCode=0 Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.411479 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8jfkx" event={"ID":"9e6b8e8f-6621-40a4-820e-f632b03bf39c","Type":"ContainerDied","Data":"d9568c0764d2a788e2e07657b82c924dafeb1341d1ac44df6aee5adfa5e874e7"} Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.411750 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8jfkx" event={"ID":"9e6b8e8f-6621-40a4-820e-f632b03bf39c","Type":"ContainerDied","Data":"74628e3e14b435dc37b9f424d4a8f294b46dbb624828b780d483a60609452490"} Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.411798 5055 scope.go:117] "RemoveContainer" containerID="d9568c0764d2a788e2e07657b82c924dafeb1341d1ac44df6aee5adfa5e874e7" Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.411495 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8jfkx" Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.428467 5055 scope.go:117] "RemoveContainer" containerID="fcd7b511cd2ef48ec130ca56e7ecc89bbac7a42ca7c8ff34d6b7be4465859c16" Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.432667 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8jfkx"] Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.441247 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8jfkx"] Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.442795 5055 scope.go:117] "RemoveContainer" containerID="1cf1a1240c6f91a3de89e5dd546851411c4ed6cd9b266a062b78c94bf13372c3" Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.460741 5055 scope.go:117] "RemoveContainer" containerID="d9568c0764d2a788e2e07657b82c924dafeb1341d1ac44df6aee5adfa5e874e7" Oct 11 07:07:47 crc kubenswrapper[5055]: E1011 07:07:47.461328 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9568c0764d2a788e2e07657b82c924dafeb1341d1ac44df6aee5adfa5e874e7\": container with ID starting with d9568c0764d2a788e2e07657b82c924dafeb1341d1ac44df6aee5adfa5e874e7 not found: ID does not exist" containerID="d9568c0764d2a788e2e07657b82c924dafeb1341d1ac44df6aee5adfa5e874e7" Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.461379 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9568c0764d2a788e2e07657b82c924dafeb1341d1ac44df6aee5adfa5e874e7"} err="failed to get container status \"d9568c0764d2a788e2e07657b82c924dafeb1341d1ac44df6aee5adfa5e874e7\": rpc error: code = NotFound desc = could not find container \"d9568c0764d2a788e2e07657b82c924dafeb1341d1ac44df6aee5adfa5e874e7\": container with ID starting with d9568c0764d2a788e2e07657b82c924dafeb1341d1ac44df6aee5adfa5e874e7 not found: ID does not exist" Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.461415 5055 scope.go:117] "RemoveContainer" containerID="fcd7b511cd2ef48ec130ca56e7ecc89bbac7a42ca7c8ff34d6b7be4465859c16" Oct 11 07:07:47 crc kubenswrapper[5055]: E1011 07:07:47.461905 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fcd7b511cd2ef48ec130ca56e7ecc89bbac7a42ca7c8ff34d6b7be4465859c16\": container with ID starting with fcd7b511cd2ef48ec130ca56e7ecc89bbac7a42ca7c8ff34d6b7be4465859c16 not found: ID does not exist" containerID="fcd7b511cd2ef48ec130ca56e7ecc89bbac7a42ca7c8ff34d6b7be4465859c16" Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.461966 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcd7b511cd2ef48ec130ca56e7ecc89bbac7a42ca7c8ff34d6b7be4465859c16"} err="failed to get container status \"fcd7b511cd2ef48ec130ca56e7ecc89bbac7a42ca7c8ff34d6b7be4465859c16\": rpc error: code = NotFound desc = could not find container \"fcd7b511cd2ef48ec130ca56e7ecc89bbac7a42ca7c8ff34d6b7be4465859c16\": container with ID starting with fcd7b511cd2ef48ec130ca56e7ecc89bbac7a42ca7c8ff34d6b7be4465859c16 not found: ID does not exist" Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.461995 5055 scope.go:117] "RemoveContainer" containerID="1cf1a1240c6f91a3de89e5dd546851411c4ed6cd9b266a062b78c94bf13372c3" Oct 11 07:07:47 crc kubenswrapper[5055]: E1011 07:07:47.462292 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1cf1a1240c6f91a3de89e5dd546851411c4ed6cd9b266a062b78c94bf13372c3\": container with ID starting with 1cf1a1240c6f91a3de89e5dd546851411c4ed6cd9b266a062b78c94bf13372c3 not found: ID does not exist" containerID="1cf1a1240c6f91a3de89e5dd546851411c4ed6cd9b266a062b78c94bf13372c3" Oct 11 07:07:47 crc kubenswrapper[5055]: I1011 07:07:47.462325 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1cf1a1240c6f91a3de89e5dd546851411c4ed6cd9b266a062b78c94bf13372c3"} err="failed to get container status \"1cf1a1240c6f91a3de89e5dd546851411c4ed6cd9b266a062b78c94bf13372c3\": rpc error: code = NotFound desc = could not find container \"1cf1a1240c6f91a3de89e5dd546851411c4ed6cd9b266a062b78c94bf13372c3\": container with ID starting with 1cf1a1240c6f91a3de89e5dd546851411c4ed6cd9b266a062b78c94bf13372c3 not found: ID does not exist" Oct 11 07:07:48 crc kubenswrapper[5055]: I1011 07:07:48.999300 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e6b8e8f-6621-40a4-820e-f632b03bf39c" path="/var/lib/kubelet/pods/9e6b8e8f-6621-40a4-820e-f632b03bf39c/volumes" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.888163 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85"] Oct 11 07:08:01 crc kubenswrapper[5055]: E1011 07:08:01.888985 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0900452-97df-4d66-beaa-81ab0c2feae7" containerName="registry-server" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.889003 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0900452-97df-4d66-beaa-81ab0c2feae7" containerName="registry-server" Oct 11 07:08:01 crc kubenswrapper[5055]: E1011 07:08:01.889018 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b84125d-bd97-477f-8a8f-8a7d29f787ff" containerName="registry-server" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.889026 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b84125d-bd97-477f-8a8f-8a7d29f787ff" containerName="registry-server" Oct 11 07:08:01 crc kubenswrapper[5055]: E1011 07:08:01.889037 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0900452-97df-4d66-beaa-81ab0c2feae7" containerName="extract-utilities" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.889044 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0900452-97df-4d66-beaa-81ab0c2feae7" containerName="extract-utilities" Oct 11 07:08:01 crc kubenswrapper[5055]: E1011 07:08:01.889053 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e6b8e8f-6621-40a4-820e-f632b03bf39c" containerName="extract-utilities" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.889060 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e6b8e8f-6621-40a4-820e-f632b03bf39c" containerName="extract-utilities" Oct 11 07:08:01 crc kubenswrapper[5055]: E1011 07:08:01.889075 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e6b8e8f-6621-40a4-820e-f632b03bf39c" containerName="extract-content" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.889082 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e6b8e8f-6621-40a4-820e-f632b03bf39c" containerName="extract-content" Oct 11 07:08:01 crc kubenswrapper[5055]: E1011 07:08:01.889094 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b84125d-bd97-477f-8a8f-8a7d29f787ff" containerName="extract-content" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.889100 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b84125d-bd97-477f-8a8f-8a7d29f787ff" containerName="extract-content" Oct 11 07:08:01 crc kubenswrapper[5055]: E1011 07:08:01.889112 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e6b8e8f-6621-40a4-820e-f632b03bf39c" containerName="registry-server" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.889117 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e6b8e8f-6621-40a4-820e-f632b03bf39c" containerName="registry-server" Oct 11 07:08:01 crc kubenswrapper[5055]: E1011 07:08:01.889126 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0900452-97df-4d66-beaa-81ab0c2feae7" containerName="extract-content" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.889131 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0900452-97df-4d66-beaa-81ab0c2feae7" containerName="extract-content" Oct 11 07:08:01 crc kubenswrapper[5055]: E1011 07:08:01.889139 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b84125d-bd97-477f-8a8f-8a7d29f787ff" containerName="extract-utilities" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.889144 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b84125d-bd97-477f-8a8f-8a7d29f787ff" containerName="extract-utilities" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.889254 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0900452-97df-4d66-beaa-81ab0c2feae7" containerName="registry-server" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.889264 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b84125d-bd97-477f-8a8f-8a7d29f787ff" containerName="registry-server" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.889275 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e6b8e8f-6621-40a4-820e-f632b03bf39c" containerName="registry-server" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.890984 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.899749 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Oct 11 07:08:01 crc kubenswrapper[5055]: I1011 07:08:01.908038 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85"] Oct 11 07:08:02 crc kubenswrapper[5055]: I1011 07:08:02.047313 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttxpw\" (UniqueName: \"kubernetes.io/projected/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-kube-api-access-ttxpw\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85\" (UID: \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" Oct 11 07:08:02 crc kubenswrapper[5055]: I1011 07:08:02.047359 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85\" (UID: \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" Oct 11 07:08:02 crc kubenswrapper[5055]: I1011 07:08:02.047396 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85\" (UID: \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" Oct 11 07:08:02 crc kubenswrapper[5055]: I1011 07:08:02.148553 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttxpw\" (UniqueName: \"kubernetes.io/projected/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-kube-api-access-ttxpw\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85\" (UID: \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" Oct 11 07:08:02 crc kubenswrapper[5055]: I1011 07:08:02.149104 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85\" (UID: \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" Oct 11 07:08:02 crc kubenswrapper[5055]: I1011 07:08:02.149204 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85\" (UID: \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" Oct 11 07:08:02 crc kubenswrapper[5055]: I1011 07:08:02.149504 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85\" (UID: \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" Oct 11 07:08:02 crc kubenswrapper[5055]: I1011 07:08:02.149575 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85\" (UID: \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" Oct 11 07:08:02 crc kubenswrapper[5055]: I1011 07:08:02.167064 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttxpw\" (UniqueName: \"kubernetes.io/projected/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-kube-api-access-ttxpw\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85\" (UID: \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" Oct 11 07:08:02 crc kubenswrapper[5055]: I1011 07:08:02.258241 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" Oct 11 07:08:02 crc kubenswrapper[5055]: I1011 07:08:02.668614 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85"] Oct 11 07:08:03 crc kubenswrapper[5055]: I1011 07:08:03.492553 5055 generic.go:334] "Generic (PLEG): container finished" podID="5d98516c-9175-49d7-aa13-c9dd8fb2ea57" containerID="6d3ff8662c2e3d3edd76b147d55c99e55343a26a8f6f68662e598b45c3c7f9e3" exitCode=0 Oct 11 07:08:03 crc kubenswrapper[5055]: I1011 07:08:03.492596 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" event={"ID":"5d98516c-9175-49d7-aa13-c9dd8fb2ea57","Type":"ContainerDied","Data":"6d3ff8662c2e3d3edd76b147d55c99e55343a26a8f6f68662e598b45c3c7f9e3"} Oct 11 07:08:03 crc kubenswrapper[5055]: I1011 07:08:03.492625 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" event={"ID":"5d98516c-9175-49d7-aa13-c9dd8fb2ea57","Type":"ContainerStarted","Data":"3f55071e1d5a7e89b3ebcbacf1f35f2ec606e9d7453ba924edecdef2192925c6"} Oct 11 07:08:04 crc kubenswrapper[5055]: I1011 07:08:04.501041 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" event={"ID":"5d98516c-9175-49d7-aa13-c9dd8fb2ea57","Type":"ContainerStarted","Data":"a175521d6f071b0a1f6b53ca8a6c0e8d8bc06e46b62ca9489d16b7ec160aac78"} Oct 11 07:08:05 crc kubenswrapper[5055]: I1011 07:08:05.513585 5055 generic.go:334] "Generic (PLEG): container finished" podID="5d98516c-9175-49d7-aa13-c9dd8fb2ea57" containerID="a175521d6f071b0a1f6b53ca8a6c0e8d8bc06e46b62ca9489d16b7ec160aac78" exitCode=0 Oct 11 07:08:05 crc kubenswrapper[5055]: I1011 07:08:05.513661 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" event={"ID":"5d98516c-9175-49d7-aa13-c9dd8fb2ea57","Type":"ContainerDied","Data":"a175521d6f071b0a1f6b53ca8a6c0e8d8bc06e46b62ca9489d16b7ec160aac78"} Oct 11 07:08:06 crc kubenswrapper[5055]: I1011 07:08:06.524876 5055 generic.go:334] "Generic (PLEG): container finished" podID="5d98516c-9175-49d7-aa13-c9dd8fb2ea57" containerID="85a5b602b5e976b16c686ba752a78e6f50703e8599524574df29bf342bddf297" exitCode=0 Oct 11 07:08:06 crc kubenswrapper[5055]: I1011 07:08:06.524960 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" event={"ID":"5d98516c-9175-49d7-aa13-c9dd8fb2ea57","Type":"ContainerDied","Data":"85a5b602b5e976b16c686ba752a78e6f50703e8599524574df29bf342bddf297"} Oct 11 07:08:07 crc kubenswrapper[5055]: I1011 07:08:07.862214 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" Oct 11 07:08:08 crc kubenswrapper[5055]: I1011 07:08:08.029118 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-bundle\") pod \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\" (UID: \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\") " Oct 11 07:08:08 crc kubenswrapper[5055]: I1011 07:08:08.029206 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttxpw\" (UniqueName: \"kubernetes.io/projected/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-kube-api-access-ttxpw\") pod \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\" (UID: \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\") " Oct 11 07:08:08 crc kubenswrapper[5055]: I1011 07:08:08.029314 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-util\") pod \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\" (UID: \"5d98516c-9175-49d7-aa13-c9dd8fb2ea57\") " Oct 11 07:08:08 crc kubenswrapper[5055]: I1011 07:08:08.030281 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-bundle" (OuterVolumeSpecName: "bundle") pod "5d98516c-9175-49d7-aa13-c9dd8fb2ea57" (UID: "5d98516c-9175-49d7-aa13-c9dd8fb2ea57"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:08:08 crc kubenswrapper[5055]: I1011 07:08:08.037715 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-kube-api-access-ttxpw" (OuterVolumeSpecName: "kube-api-access-ttxpw") pod "5d98516c-9175-49d7-aa13-c9dd8fb2ea57" (UID: "5d98516c-9175-49d7-aa13-c9dd8fb2ea57"). InnerVolumeSpecName "kube-api-access-ttxpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:08:08 crc kubenswrapper[5055]: I1011 07:08:08.066336 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-util" (OuterVolumeSpecName: "util") pod "5d98516c-9175-49d7-aa13-c9dd8fb2ea57" (UID: "5d98516c-9175-49d7-aa13-c9dd8fb2ea57"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:08:08 crc kubenswrapper[5055]: I1011 07:08:08.130939 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttxpw\" (UniqueName: \"kubernetes.io/projected/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-kube-api-access-ttxpw\") on node \"crc\" DevicePath \"\"" Oct 11 07:08:08 crc kubenswrapper[5055]: I1011 07:08:08.130980 5055 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-util\") on node \"crc\" DevicePath \"\"" Oct 11 07:08:08 crc kubenswrapper[5055]: I1011 07:08:08.130994 5055 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5d98516c-9175-49d7-aa13-c9dd8fb2ea57-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:08:08 crc kubenswrapper[5055]: I1011 07:08:08.536309 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" event={"ID":"5d98516c-9175-49d7-aa13-c9dd8fb2ea57","Type":"ContainerDied","Data":"3f55071e1d5a7e89b3ebcbacf1f35f2ec606e9d7453ba924edecdef2192925c6"} Oct 11 07:08:08 crc kubenswrapper[5055]: I1011 07:08:08.536574 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f55071e1d5a7e89b3ebcbacf1f35f2ec606e9d7453ba924edecdef2192925c6" Oct 11 07:08:08 crc kubenswrapper[5055]: I1011 07:08:08.536549 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85" Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.170880 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-p9blj"] Oct 11 07:08:11 crc kubenswrapper[5055]: E1011 07:08:11.171431 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d98516c-9175-49d7-aa13-c9dd8fb2ea57" containerName="extract" Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.171447 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d98516c-9175-49d7-aa13-c9dd8fb2ea57" containerName="extract" Oct 11 07:08:11 crc kubenswrapper[5055]: E1011 07:08:11.171474 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d98516c-9175-49d7-aa13-c9dd8fb2ea57" containerName="util" Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.171482 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d98516c-9175-49d7-aa13-c9dd8fb2ea57" containerName="util" Oct 11 07:08:11 crc kubenswrapper[5055]: E1011 07:08:11.171509 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d98516c-9175-49d7-aa13-c9dd8fb2ea57" containerName="pull" Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.171519 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d98516c-9175-49d7-aa13-c9dd8fb2ea57" containerName="pull" Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.171629 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d98516c-9175-49d7-aa13-c9dd8fb2ea57" containerName="extract" Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.172076 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-858ddd8f98-p9blj" Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.174737 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.174971 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.174984 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-7x7cw" Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.190562 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-p9blj"] Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.271151 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tph2h\" (UniqueName: \"kubernetes.io/projected/82160f7a-cf39-40a9-904f-4343de88371c-kube-api-access-tph2h\") pod \"nmstate-operator-858ddd8f98-p9blj\" (UID: \"82160f7a-cf39-40a9-904f-4343de88371c\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-p9blj" Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.372601 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tph2h\" (UniqueName: \"kubernetes.io/projected/82160f7a-cf39-40a9-904f-4343de88371c-kube-api-access-tph2h\") pod \"nmstate-operator-858ddd8f98-p9blj\" (UID: \"82160f7a-cf39-40a9-904f-4343de88371c\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-p9blj" Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.388618 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tph2h\" (UniqueName: \"kubernetes.io/projected/82160f7a-cf39-40a9-904f-4343de88371c-kube-api-access-tph2h\") pod \"nmstate-operator-858ddd8f98-p9blj\" (UID: \"82160f7a-cf39-40a9-904f-4343de88371c\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-p9blj" Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.489311 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-858ddd8f98-p9blj" Oct 11 07:08:11 crc kubenswrapper[5055]: I1011 07:08:11.882029 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-p9blj"] Oct 11 07:08:12 crc kubenswrapper[5055]: I1011 07:08:12.558673 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-858ddd8f98-p9blj" event={"ID":"82160f7a-cf39-40a9-904f-4343de88371c","Type":"ContainerStarted","Data":"cac693ca63d3d8fbc051a81a37b8b164aa5c53449f656609d1f0de8de97cfda0"} Oct 11 07:08:14 crc kubenswrapper[5055]: I1011 07:08:14.580314 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-858ddd8f98-p9blj" event={"ID":"82160f7a-cf39-40a9-904f-4343de88371c","Type":"ContainerStarted","Data":"41824a5cc0fa469b746f916ef001fc30b27031ade2f50f04b371d32519815676"} Oct 11 07:08:14 crc kubenswrapper[5055]: I1011 07:08:14.622018 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-858ddd8f98-p9blj" podStartSLOduration=2.068786459 podStartE2EDuration="3.621987111s" podCreationTimestamp="2025-10-11 07:08:11 +0000 UTC" firstStartedPulling="2025-10-11 07:08:11.897972625 +0000 UTC m=+875.672246432" lastFinishedPulling="2025-10-11 07:08:13.451173257 +0000 UTC m=+877.225447084" observedRunningTime="2025-10-11 07:08:14.62038015 +0000 UTC m=+878.394653977" watchObservedRunningTime="2025-10-11 07:08:14.621987111 +0000 UTC m=+878.396260928" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.398849 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-kprdt"] Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.400225 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-kprdt" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.402692 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-tzbmg" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.406881 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj"] Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.407652 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj" Oct 11 07:08:22 crc kubenswrapper[5055]: W1011 07:08:22.410522 5055 reflector.go:561] object-"openshift-nmstate"/"openshift-nmstate-webhook": failed to list *v1.Secret: secrets "openshift-nmstate-webhook" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-nmstate": no relationship found between node 'crc' and this object Oct 11 07:08:22 crc kubenswrapper[5055]: E1011 07:08:22.410559 5055 reflector.go:158] "Unhandled Error" err="object-\"openshift-nmstate\"/\"openshift-nmstate-webhook\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-nmstate-webhook\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-nmstate\": no relationship found between node 'crc' and this object" logger="UnhandledError" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.410590 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-kprdt"] Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.424348 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-n4knm"] Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.425068 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.428367 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj"] Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.525520 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454"] Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.526451 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.528052 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.528941 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.531887 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-w7vmz" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.535433 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vjt2\" (UniqueName: \"kubernetes.io/projected/a47eeb98-80a9-4ee6-8175-cf40a0e5a59d-kube-api-access-2vjt2\") pod \"nmstate-metrics-fdff9cb8d-kprdt\" (UID: \"a47eeb98-80a9-4ee6-8175-cf40a0e5a59d\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-kprdt" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.535500 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/20c5f869-3113-4877-8052-73033c2200ba-dbus-socket\") pod \"nmstate-handler-n4knm\" (UID: \"20c5f869-3113-4877-8052-73033c2200ba\") " pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.535518 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/20c5f869-3113-4877-8052-73033c2200ba-ovs-socket\") pod \"nmstate-handler-n4knm\" (UID: \"20c5f869-3113-4877-8052-73033c2200ba\") " pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.535533 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/20c5f869-3113-4877-8052-73033c2200ba-nmstate-lock\") pod \"nmstate-handler-n4knm\" (UID: \"20c5f869-3113-4877-8052-73033c2200ba\") " pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.535550 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/47925cdb-bc01-447e-9d57-277acb520901-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-xh8hj\" (UID: \"47925cdb-bc01-447e-9d57-277acb520901\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.535567 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zwgw\" (UniqueName: \"kubernetes.io/projected/20c5f869-3113-4877-8052-73033c2200ba-kube-api-access-2zwgw\") pod \"nmstate-handler-n4knm\" (UID: \"20c5f869-3113-4877-8052-73033c2200ba\") " pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.535584 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj2c6\" (UniqueName: \"kubernetes.io/projected/47925cdb-bc01-447e-9d57-277acb520901-kube-api-access-tj2c6\") pod \"nmstate-webhook-6cdbc54649-xh8hj\" (UID: \"47925cdb-bc01-447e-9d57-277acb520901\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.538072 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454"] Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.638703 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/20c5f869-3113-4877-8052-73033c2200ba-dbus-socket\") pod \"nmstate-handler-n4knm\" (UID: \"20c5f869-3113-4877-8052-73033c2200ba\") " pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.638754 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/20c5f869-3113-4877-8052-73033c2200ba-ovs-socket\") pod \"nmstate-handler-n4knm\" (UID: \"20c5f869-3113-4877-8052-73033c2200ba\") " pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.638774 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/20c5f869-3113-4877-8052-73033c2200ba-nmstate-lock\") pod \"nmstate-handler-n4knm\" (UID: \"20c5f869-3113-4877-8052-73033c2200ba\") " pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.638806 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/47925cdb-bc01-447e-9d57-277acb520901-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-xh8hj\" (UID: \"47925cdb-bc01-447e-9d57-277acb520901\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.638829 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zwgw\" (UniqueName: \"kubernetes.io/projected/20c5f869-3113-4877-8052-73033c2200ba-kube-api-access-2zwgw\") pod \"nmstate-handler-n4knm\" (UID: \"20c5f869-3113-4877-8052-73033c2200ba\") " pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.638874 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj2c6\" (UniqueName: \"kubernetes.io/projected/47925cdb-bc01-447e-9d57-277acb520901-kube-api-access-tj2c6\") pod \"nmstate-webhook-6cdbc54649-xh8hj\" (UID: \"47925cdb-bc01-447e-9d57-277acb520901\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.638898 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8clj\" (UniqueName: \"kubernetes.io/projected/b2155c2a-94c5-4dd0-8960-a40eda21c6b2-kube-api-access-j8clj\") pod \"nmstate-console-plugin-6b874cbd85-fp454\" (UID: \"b2155c2a-94c5-4dd0-8960-a40eda21c6b2\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.638923 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b2155c2a-94c5-4dd0-8960-a40eda21c6b2-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-fp454\" (UID: \"b2155c2a-94c5-4dd0-8960-a40eda21c6b2\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.638940 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b2155c2a-94c5-4dd0-8960-a40eda21c6b2-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-fp454\" (UID: \"b2155c2a-94c5-4dd0-8960-a40eda21c6b2\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.638974 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vjt2\" (UniqueName: \"kubernetes.io/projected/a47eeb98-80a9-4ee6-8175-cf40a0e5a59d-kube-api-access-2vjt2\") pod \"nmstate-metrics-fdff9cb8d-kprdt\" (UID: \"a47eeb98-80a9-4ee6-8175-cf40a0e5a59d\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-kprdt" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.639082 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/20c5f869-3113-4877-8052-73033c2200ba-dbus-socket\") pod \"nmstate-handler-n4knm\" (UID: \"20c5f869-3113-4877-8052-73033c2200ba\") " pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.639360 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/20c5f869-3113-4877-8052-73033c2200ba-nmstate-lock\") pod \"nmstate-handler-n4knm\" (UID: \"20c5f869-3113-4877-8052-73033c2200ba\") " pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.639405 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/20c5f869-3113-4877-8052-73033c2200ba-ovs-socket\") pod \"nmstate-handler-n4knm\" (UID: \"20c5f869-3113-4877-8052-73033c2200ba\") " pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.672693 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vjt2\" (UniqueName: \"kubernetes.io/projected/a47eeb98-80a9-4ee6-8175-cf40a0e5a59d-kube-api-access-2vjt2\") pod \"nmstate-metrics-fdff9cb8d-kprdt\" (UID: \"a47eeb98-80a9-4ee6-8175-cf40a0e5a59d\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-kprdt" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.673359 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zwgw\" (UniqueName: \"kubernetes.io/projected/20c5f869-3113-4877-8052-73033c2200ba-kube-api-access-2zwgw\") pod \"nmstate-handler-n4knm\" (UID: \"20c5f869-3113-4877-8052-73033c2200ba\") " pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.710479 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj2c6\" (UniqueName: \"kubernetes.io/projected/47925cdb-bc01-447e-9d57-277acb520901-kube-api-access-tj2c6\") pod \"nmstate-webhook-6cdbc54649-xh8hj\" (UID: \"47925cdb-bc01-447e-9d57-277acb520901\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.740022 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8clj\" (UniqueName: \"kubernetes.io/projected/b2155c2a-94c5-4dd0-8960-a40eda21c6b2-kube-api-access-j8clj\") pod \"nmstate-console-plugin-6b874cbd85-fp454\" (UID: \"b2155c2a-94c5-4dd0-8960-a40eda21c6b2\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.740065 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b2155c2a-94c5-4dd0-8960-a40eda21c6b2-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-fp454\" (UID: \"b2155c2a-94c5-4dd0-8960-a40eda21c6b2\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.740092 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b2155c2a-94c5-4dd0-8960-a40eda21c6b2-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-fp454\" (UID: \"b2155c2a-94c5-4dd0-8960-a40eda21c6b2\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.740957 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b2155c2a-94c5-4dd0-8960-a40eda21c6b2-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-fp454\" (UID: \"b2155c2a-94c5-4dd0-8960-a40eda21c6b2\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.746354 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b2155c2a-94c5-4dd0-8960-a40eda21c6b2-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-fp454\" (UID: \"b2155c2a-94c5-4dd0-8960-a40eda21c6b2\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.763042 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-kprdt" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.763668 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8clj\" (UniqueName: \"kubernetes.io/projected/b2155c2a-94c5-4dd0-8960-a40eda21c6b2-kube-api-access-j8clj\") pod \"nmstate-console-plugin-6b874cbd85-fp454\" (UID: \"b2155c2a-94c5-4dd0-8960-a40eda21c6b2\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.770130 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6fb7678fb5-hgnq2"] Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.770982 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.780051 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6fb7678fb5-hgnq2"] Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.791393 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:22 crc kubenswrapper[5055]: W1011 07:08:22.817083 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod20c5f869_3113_4877_8052_73033c2200ba.slice/crio-7034580abf41939521b54b7760c6b317fe9f1c48528fd76c17a5c367a02fd9c6 WatchSource:0}: Error finding container 7034580abf41939521b54b7760c6b317fe9f1c48528fd76c17a5c367a02fd9c6: Status 404 returned error can't find the container with id 7034580abf41939521b54b7760c6b317fe9f1c48528fd76c17a5c367a02fd9c6 Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.852011 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.942954 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-oauth-serving-cert\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.942996 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-service-ca\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.943021 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-trusted-ca-bundle\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.943040 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-console-config\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.943164 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q67vk\" (UniqueName: \"kubernetes.io/projected/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-kube-api-access-q67vk\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.943230 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-console-oauth-config\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.943252 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-console-serving-cert\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:22 crc kubenswrapper[5055]: I1011 07:08:22.956896 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-kprdt"] Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.042461 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454"] Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.046279 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-console-oauth-config\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.046334 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-console-serving-cert\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.046425 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-oauth-serving-cert\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.046461 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-service-ca\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.046489 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-trusted-ca-bundle\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.046508 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-console-config\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.046537 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q67vk\" (UniqueName: \"kubernetes.io/projected/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-kube-api-access-q67vk\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.047370 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-service-ca\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.047707 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-trusted-ca-bundle\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.048339 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-oauth-serving-cert\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.048340 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-console-config\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.053136 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-console-serving-cert\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.054148 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-console-oauth-config\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.062609 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q67vk\" (UniqueName: \"kubernetes.io/projected/d2b0c38c-747d-4bd4-be1c-c090e926a3f0-kube-api-access-q67vk\") pod \"console-6fb7678fb5-hgnq2\" (UID: \"d2b0c38c-747d-4bd4-be1c-c090e926a3f0\") " pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.115101 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.276873 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6fb7678fb5-hgnq2"] Oct 11 07:08:23 crc kubenswrapper[5055]: W1011 07:08:23.284074 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd2b0c38c_747d_4bd4_be1c_c090e926a3f0.slice/crio-dcc8140801951f58c67c88b7f4fee4c2b176b86488903826084020ffac7bd58d WatchSource:0}: Error finding container dcc8140801951f58c67c88b7f4fee4c2b176b86488903826084020ffac7bd58d: Status 404 returned error can't find the container with id dcc8140801951f58c67c88b7f4fee4c2b176b86488903826084020ffac7bd58d Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.451071 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.464116 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/47925cdb-bc01-447e-9d57-277acb520901-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-xh8hj\" (UID: \"47925cdb-bc01-447e-9d57-277acb520901\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.638590 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454" event={"ID":"b2155c2a-94c5-4dd0-8960-a40eda21c6b2","Type":"ContainerStarted","Data":"968c0c92b2ebc3301e4b0a99eb7400b1eb8e0732def031ffa249003863875f3c"} Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.640959 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6fb7678fb5-hgnq2" event={"ID":"d2b0c38c-747d-4bd4-be1c-c090e926a3f0","Type":"ContainerStarted","Data":"f88d99a318a2e9e349e5dca52390420e5dd090853a73d58cacfa54a627b4ebad"} Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.641001 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6fb7678fb5-hgnq2" event={"ID":"d2b0c38c-747d-4bd4-be1c-c090e926a3f0","Type":"ContainerStarted","Data":"dcc8140801951f58c67c88b7f4fee4c2b176b86488903826084020ffac7bd58d"} Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.642604 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-n4knm" event={"ID":"20c5f869-3113-4877-8052-73033c2200ba","Type":"ContainerStarted","Data":"7034580abf41939521b54b7760c6b317fe9f1c48528fd76c17a5c367a02fd9c6"} Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.644182 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-kprdt" event={"ID":"a47eeb98-80a9-4ee6-8175-cf40a0e5a59d","Type":"ContainerStarted","Data":"53e41051733af35bf31d508a6ad963546a7606fa87e73b6f090a16d3c800ee4f"} Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.663372 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6fb7678fb5-hgnq2" podStartSLOduration=1.6633553349999999 podStartE2EDuration="1.663355335s" podCreationTimestamp="2025-10-11 07:08:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:08:23.65963836 +0000 UTC m=+887.433912177" watchObservedRunningTime="2025-10-11 07:08:23.663355335 +0000 UTC m=+887.437629142" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.682599 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj" Oct 11 07:08:23 crc kubenswrapper[5055]: I1011 07:08:23.870890 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj"] Oct 11 07:08:23 crc kubenswrapper[5055]: W1011 07:08:23.875894 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47925cdb_bc01_447e_9d57_277acb520901.slice/crio-48701eac4ab60ba15b9457772ec0fdce6c319a284349a9967ef5d32b13eec060 WatchSource:0}: Error finding container 48701eac4ab60ba15b9457772ec0fdce6c319a284349a9967ef5d32b13eec060: Status 404 returned error can't find the container with id 48701eac4ab60ba15b9457772ec0fdce6c319a284349a9967ef5d32b13eec060 Oct 11 07:08:24 crc kubenswrapper[5055]: I1011 07:08:24.651034 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj" event={"ID":"47925cdb-bc01-447e-9d57-277acb520901","Type":"ContainerStarted","Data":"48701eac4ab60ba15b9457772ec0fdce6c319a284349a9967ef5d32b13eec060"} Oct 11 07:08:26 crc kubenswrapper[5055]: I1011 07:08:26.666870 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-kprdt" event={"ID":"a47eeb98-80a9-4ee6-8175-cf40a0e5a59d","Type":"ContainerStarted","Data":"ed6de50b29d98b5a444d46b2c5389a4a8c2d27f75c356c6dee0451e70734345d"} Oct 11 07:08:26 crc kubenswrapper[5055]: I1011 07:08:26.668924 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454" event={"ID":"b2155c2a-94c5-4dd0-8960-a40eda21c6b2","Type":"ContainerStarted","Data":"dc63e08d69c61b04f57898b4419e01506ee69763c2d78018935ba3bbc62f2669"} Oct 11 07:08:26 crc kubenswrapper[5055]: I1011 07:08:26.671265 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj" event={"ID":"47925cdb-bc01-447e-9d57-277acb520901","Type":"ContainerStarted","Data":"1680642c9a755751e7bb67e9b35869966c17ae24ac818a7b2c55e9d7112bb375"} Oct 11 07:08:26 crc kubenswrapper[5055]: I1011 07:08:26.671328 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj" Oct 11 07:08:26 crc kubenswrapper[5055]: I1011 07:08:26.673617 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-n4knm" event={"ID":"20c5f869-3113-4877-8052-73033c2200ba","Type":"ContainerStarted","Data":"f72441bc74f7baf839196b5e3b5bbb4e9e313a30ca2e831642c38bf66954c3be"} Oct 11 07:08:26 crc kubenswrapper[5055]: I1011 07:08:26.673784 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:26 crc kubenswrapper[5055]: I1011 07:08:26.682756 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-fp454" podStartSLOduration=2.209345165 podStartE2EDuration="4.682741587s" podCreationTimestamp="2025-10-11 07:08:22 +0000 UTC" firstStartedPulling="2025-10-11 07:08:23.057758085 +0000 UTC m=+886.832031892" lastFinishedPulling="2025-10-11 07:08:25.531154507 +0000 UTC m=+889.305428314" observedRunningTime="2025-10-11 07:08:26.682268345 +0000 UTC m=+890.456542152" watchObservedRunningTime="2025-10-11 07:08:26.682741587 +0000 UTC m=+890.457015394" Oct 11 07:08:26 crc kubenswrapper[5055]: I1011 07:08:26.702956 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj" podStartSLOduration=3.026482177 podStartE2EDuration="4.702937535s" podCreationTimestamp="2025-10-11 07:08:22 +0000 UTC" firstStartedPulling="2025-10-11 07:08:23.878213033 +0000 UTC m=+887.652486840" lastFinishedPulling="2025-10-11 07:08:25.554668391 +0000 UTC m=+889.328942198" observedRunningTime="2025-10-11 07:08:26.702479374 +0000 UTC m=+890.476753181" watchObservedRunningTime="2025-10-11 07:08:26.702937535 +0000 UTC m=+890.477211342" Oct 11 07:08:26 crc kubenswrapper[5055]: I1011 07:08:26.723902 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-n4knm" podStartSLOduration=1.9890683089999999 podStartE2EDuration="4.723878183s" podCreationTimestamp="2025-10-11 07:08:22 +0000 UTC" firstStartedPulling="2025-10-11 07:08:22.81882065 +0000 UTC m=+886.593094457" lastFinishedPulling="2025-10-11 07:08:25.553630514 +0000 UTC m=+889.327904331" observedRunningTime="2025-10-11 07:08:26.719244844 +0000 UTC m=+890.493518661" watchObservedRunningTime="2025-10-11 07:08:26.723878183 +0000 UTC m=+890.498152000" Oct 11 07:08:28 crc kubenswrapper[5055]: I1011 07:08:28.686500 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-kprdt" event={"ID":"a47eeb98-80a9-4ee6-8175-cf40a0e5a59d","Type":"ContainerStarted","Data":"65b0acbb743bedee4e9bfd617c2090028521670df2cd433faaa4a3399478e293"} Oct 11 07:08:28 crc kubenswrapper[5055]: I1011 07:08:28.702742 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-kprdt" podStartSLOduration=1.986904005 podStartE2EDuration="6.702715986s" podCreationTimestamp="2025-10-11 07:08:22 +0000 UTC" firstStartedPulling="2025-10-11 07:08:22.966740289 +0000 UTC m=+886.741014096" lastFinishedPulling="2025-10-11 07:08:27.68255227 +0000 UTC m=+891.456826077" observedRunningTime="2025-10-11 07:08:28.701344751 +0000 UTC m=+892.475618558" watchObservedRunningTime="2025-10-11 07:08:28.702715986 +0000 UTC m=+892.476989813" Oct 11 07:08:32 crc kubenswrapper[5055]: I1011 07:08:32.422180 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:08:32 crc kubenswrapper[5055]: I1011 07:08:32.422244 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:08:32 crc kubenswrapper[5055]: I1011 07:08:32.815997 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-n4knm" Oct 11 07:08:33 crc kubenswrapper[5055]: I1011 07:08:33.115280 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:33 crc kubenswrapper[5055]: I1011 07:08:33.115357 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:33 crc kubenswrapper[5055]: I1011 07:08:33.119506 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:33 crc kubenswrapper[5055]: I1011 07:08:33.720604 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6fb7678fb5-hgnq2" Oct 11 07:08:33 crc kubenswrapper[5055]: I1011 07:08:33.767893 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-gjjf2"] Oct 11 07:08:43 crc kubenswrapper[5055]: I1011 07:08:43.687532 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-xh8hj" Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.007899 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq"] Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.009594 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.011433 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.018087 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq"] Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.073309 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04210186-75b3-4eaa-9eb6-f4b5c41af23a-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq\" (UID: \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.073364 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04210186-75b3-4eaa-9eb6-f4b5c41af23a-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq\" (UID: \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.073397 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkg5w\" (UniqueName: \"kubernetes.io/projected/04210186-75b3-4eaa-9eb6-f4b5c41af23a-kube-api-access-pkg5w\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq\" (UID: \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.174863 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04210186-75b3-4eaa-9eb6-f4b5c41af23a-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq\" (UID: \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.175162 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04210186-75b3-4eaa-9eb6-f4b5c41af23a-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq\" (UID: \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.175309 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkg5w\" (UniqueName: \"kubernetes.io/projected/04210186-75b3-4eaa-9eb6-f4b5c41af23a-kube-api-access-pkg5w\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq\" (UID: \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.175366 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04210186-75b3-4eaa-9eb6-f4b5c41af23a-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq\" (UID: \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.175830 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04210186-75b3-4eaa-9eb6-f4b5c41af23a-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq\" (UID: \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.201051 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkg5w\" (UniqueName: \"kubernetes.io/projected/04210186-75b3-4eaa-9eb6-f4b5c41af23a-kube-api-access-pkg5w\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq\" (UID: \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.333168 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.527092 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq"] Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.830083 5055 generic.go:334] "Generic (PLEG): container finished" podID="04210186-75b3-4eaa-9eb6-f4b5c41af23a" containerID="589ff3e0a3387c3ff4da3a3b73e51476f40b027641c6f8eee558a5a32dff2aca" exitCode=0 Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.830144 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" event={"ID":"04210186-75b3-4eaa-9eb6-f4b5c41af23a","Type":"ContainerDied","Data":"589ff3e0a3387c3ff4da3a3b73e51476f40b027641c6f8eee558a5a32dff2aca"} Oct 11 07:08:56 crc kubenswrapper[5055]: I1011 07:08:56.830426 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" event={"ID":"04210186-75b3-4eaa-9eb6-f4b5c41af23a","Type":"ContainerStarted","Data":"c9e9b6394931dd946d5d3b47de2efbb304c7523a9ec2cd656aff555d00994087"} Oct 11 07:08:58 crc kubenswrapper[5055]: I1011 07:08:58.804261 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-gjjf2" podUID="bbad9e64-0bb0-4acc-9bff-63234abcf93c" containerName="console" containerID="cri-o://d1caef781fbca211e47cdf81c67e07666f0b9955b9a4f0e629b9ccb273244636" gracePeriod=15 Oct 11 07:08:58 crc kubenswrapper[5055]: I1011 07:08:58.816305 5055 patch_prober.go:28] interesting pod/console-f9d7485db-gjjf2 container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.42:8443/health\": dial tcp 10.217.0.42:8443: connect: connection refused" start-of-body= Oct 11 07:08:58 crc kubenswrapper[5055]: I1011 07:08:58.816369 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-f9d7485db-gjjf2" podUID="bbad9e64-0bb0-4acc-9bff-63234abcf93c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.42:8443/health\": dial tcp 10.217.0.42:8443: connect: connection refused" Oct 11 07:08:58 crc kubenswrapper[5055]: I1011 07:08:58.844479 5055 generic.go:334] "Generic (PLEG): container finished" podID="04210186-75b3-4eaa-9eb6-f4b5c41af23a" containerID="4ccc5cd186c7904aad74d9e1534a065605408015f0611ec9ee473e25484a7120" exitCode=0 Oct 11 07:08:58 crc kubenswrapper[5055]: I1011 07:08:58.844531 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" event={"ID":"04210186-75b3-4eaa-9eb6-f4b5c41af23a","Type":"ContainerDied","Data":"4ccc5cd186c7904aad74d9e1534a065605408015f0611ec9ee473e25484a7120"} Oct 11 07:08:59 crc kubenswrapper[5055]: I1011 07:08:59.851624 5055 generic.go:334] "Generic (PLEG): container finished" podID="04210186-75b3-4eaa-9eb6-f4b5c41af23a" containerID="8e081b2403fc54dc6a3f5caab7784e12e610924f83884c0ab6870decda6c3e83" exitCode=0 Oct 11 07:08:59 crc kubenswrapper[5055]: I1011 07:08:59.851809 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" event={"ID":"04210186-75b3-4eaa-9eb6-f4b5c41af23a","Type":"ContainerDied","Data":"8e081b2403fc54dc6a3f5caab7784e12e610924f83884c0ab6870decda6c3e83"} Oct 11 07:08:59 crc kubenswrapper[5055]: I1011 07:08:59.853963 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-gjjf2_bbad9e64-0bb0-4acc-9bff-63234abcf93c/console/0.log" Oct 11 07:08:59 crc kubenswrapper[5055]: I1011 07:08:59.854006 5055 generic.go:334] "Generic (PLEG): container finished" podID="bbad9e64-0bb0-4acc-9bff-63234abcf93c" containerID="d1caef781fbca211e47cdf81c67e07666f0b9955b9a4f0e629b9ccb273244636" exitCode=2 Oct 11 07:08:59 crc kubenswrapper[5055]: I1011 07:08:59.854036 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-gjjf2" event={"ID":"bbad9e64-0bb0-4acc-9bff-63234abcf93c","Type":"ContainerDied","Data":"d1caef781fbca211e47cdf81c67e07666f0b9955b9a4f0e629b9ccb273244636"} Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.080020 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-gjjf2_bbad9e64-0bb0-4acc-9bff-63234abcf93c/console/0.log" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.080082 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.129607 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-config\") pod \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.129650 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-oauth-serving-cert\") pod \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.129686 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-serving-cert\") pod \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.129732 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-oauth-config\") pod \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.129786 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-service-ca\") pod \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.129829 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92rhq\" (UniqueName: \"kubernetes.io/projected/bbad9e64-0bb0-4acc-9bff-63234abcf93c-kube-api-access-92rhq\") pod \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.129856 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-trusted-ca-bundle\") pod \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\" (UID: \"bbad9e64-0bb0-4acc-9bff-63234abcf93c\") " Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.130680 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "bbad9e64-0bb0-4acc-9bff-63234abcf93c" (UID: "bbad9e64-0bb0-4acc-9bff-63234abcf93c"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.130669 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-service-ca" (OuterVolumeSpecName: "service-ca") pod "bbad9e64-0bb0-4acc-9bff-63234abcf93c" (UID: "bbad9e64-0bb0-4acc-9bff-63234abcf93c"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.130690 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "bbad9e64-0bb0-4acc-9bff-63234abcf93c" (UID: "bbad9e64-0bb0-4acc-9bff-63234abcf93c"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.130721 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-config" (OuterVolumeSpecName: "console-config") pod "bbad9e64-0bb0-4acc-9bff-63234abcf93c" (UID: "bbad9e64-0bb0-4acc-9bff-63234abcf93c"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.135222 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbad9e64-0bb0-4acc-9bff-63234abcf93c-kube-api-access-92rhq" (OuterVolumeSpecName: "kube-api-access-92rhq") pod "bbad9e64-0bb0-4acc-9bff-63234abcf93c" (UID: "bbad9e64-0bb0-4acc-9bff-63234abcf93c"). InnerVolumeSpecName "kube-api-access-92rhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.135263 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "bbad9e64-0bb0-4acc-9bff-63234abcf93c" (UID: "bbad9e64-0bb0-4acc-9bff-63234abcf93c"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.135632 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "bbad9e64-0bb0-4acc-9bff-63234abcf93c" (UID: "bbad9e64-0bb0-4acc-9bff-63234abcf93c"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.231729 5055 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.231828 5055 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.231847 5055 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.231862 5055 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bbad9e64-0bb0-4acc-9bff-63234abcf93c-console-oauth-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.231876 5055 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-service-ca\") on node \"crc\" DevicePath \"\"" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.231888 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92rhq\" (UniqueName: \"kubernetes.io/projected/bbad9e64-0bb0-4acc-9bff-63234abcf93c-kube-api-access-92rhq\") on node \"crc\" DevicePath \"\"" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.231909 5055 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bbad9e64-0bb0-4acc-9bff-63234abcf93c-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.862268 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-gjjf2_bbad9e64-0bb0-4acc-9bff-63234abcf93c/console/0.log" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.862404 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-gjjf2" event={"ID":"bbad9e64-0bb0-4acc-9bff-63234abcf93c","Type":"ContainerDied","Data":"8adaf00efe061fcb53c6a2a8e97b18c2335f07215f469df43dc42bcd943a0d3c"} Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.862460 5055 scope.go:117] "RemoveContainer" containerID="d1caef781fbca211e47cdf81c67e07666f0b9955b9a4f0e629b9ccb273244636" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.862550 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-gjjf2" Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.896750 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-gjjf2"] Oct 11 07:09:00 crc kubenswrapper[5055]: I1011 07:09:00.901486 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-gjjf2"] Oct 11 07:09:01 crc kubenswrapper[5055]: I1011 07:09:01.007083 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bbad9e64-0bb0-4acc-9bff-63234abcf93c" path="/var/lib/kubelet/pods/bbad9e64-0bb0-4acc-9bff-63234abcf93c/volumes" Oct 11 07:09:01 crc kubenswrapper[5055]: I1011 07:09:01.132914 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" Oct 11 07:09:01 crc kubenswrapper[5055]: I1011 07:09:01.143956 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04210186-75b3-4eaa-9eb6-f4b5c41af23a-bundle\") pod \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\" (UID: \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\") " Oct 11 07:09:01 crc kubenswrapper[5055]: I1011 07:09:01.144046 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04210186-75b3-4eaa-9eb6-f4b5c41af23a-util\") pod \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\" (UID: \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\") " Oct 11 07:09:01 crc kubenswrapper[5055]: I1011 07:09:01.144111 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkg5w\" (UniqueName: \"kubernetes.io/projected/04210186-75b3-4eaa-9eb6-f4b5c41af23a-kube-api-access-pkg5w\") pod \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\" (UID: \"04210186-75b3-4eaa-9eb6-f4b5c41af23a\") " Oct 11 07:09:01 crc kubenswrapper[5055]: I1011 07:09:01.145784 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04210186-75b3-4eaa-9eb6-f4b5c41af23a-bundle" (OuterVolumeSpecName: "bundle") pod "04210186-75b3-4eaa-9eb6-f4b5c41af23a" (UID: "04210186-75b3-4eaa-9eb6-f4b5c41af23a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:09:01 crc kubenswrapper[5055]: I1011 07:09:01.166408 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04210186-75b3-4eaa-9eb6-f4b5c41af23a-kube-api-access-pkg5w" (OuterVolumeSpecName: "kube-api-access-pkg5w") pod "04210186-75b3-4eaa-9eb6-f4b5c41af23a" (UID: "04210186-75b3-4eaa-9eb6-f4b5c41af23a"). InnerVolumeSpecName "kube-api-access-pkg5w". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:09:01 crc kubenswrapper[5055]: I1011 07:09:01.176885 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04210186-75b3-4eaa-9eb6-f4b5c41af23a-util" (OuterVolumeSpecName: "util") pod "04210186-75b3-4eaa-9eb6-f4b5c41af23a" (UID: "04210186-75b3-4eaa-9eb6-f4b5c41af23a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:09:01 crc kubenswrapper[5055]: I1011 07:09:01.246033 5055 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04210186-75b3-4eaa-9eb6-f4b5c41af23a-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:09:01 crc kubenswrapper[5055]: I1011 07:09:01.246082 5055 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04210186-75b3-4eaa-9eb6-f4b5c41af23a-util\") on node \"crc\" DevicePath \"\"" Oct 11 07:09:01 crc kubenswrapper[5055]: I1011 07:09:01.246102 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkg5w\" (UniqueName: \"kubernetes.io/projected/04210186-75b3-4eaa-9eb6-f4b5c41af23a-kube-api-access-pkg5w\") on node \"crc\" DevicePath \"\"" Oct 11 07:09:01 crc kubenswrapper[5055]: I1011 07:09:01.869225 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" Oct 11 07:09:01 crc kubenswrapper[5055]: I1011 07:09:01.869251 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq" event={"ID":"04210186-75b3-4eaa-9eb6-f4b5c41af23a","Type":"ContainerDied","Data":"c9e9b6394931dd946d5d3b47de2efbb304c7523a9ec2cd656aff555d00994087"} Oct 11 07:09:01 crc kubenswrapper[5055]: I1011 07:09:01.869313 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9e9b6394931dd946d5d3b47de2efbb304c7523a9ec2cd656aff555d00994087" Oct 11 07:09:02 crc kubenswrapper[5055]: I1011 07:09:02.422750 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:09:02 crc kubenswrapper[5055]: I1011 07:09:02.422849 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.338425 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd"] Oct 11 07:09:10 crc kubenswrapper[5055]: E1011 07:09:10.339496 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04210186-75b3-4eaa-9eb6-f4b5c41af23a" containerName="extract" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.339513 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="04210186-75b3-4eaa-9eb6-f4b5c41af23a" containerName="extract" Oct 11 07:09:10 crc kubenswrapper[5055]: E1011 07:09:10.339527 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04210186-75b3-4eaa-9eb6-f4b5c41af23a" containerName="util" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.339535 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="04210186-75b3-4eaa-9eb6-f4b5c41af23a" containerName="util" Oct 11 07:09:10 crc kubenswrapper[5055]: E1011 07:09:10.339557 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbad9e64-0bb0-4acc-9bff-63234abcf93c" containerName="console" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.339565 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbad9e64-0bb0-4acc-9bff-63234abcf93c" containerName="console" Oct 11 07:09:10 crc kubenswrapper[5055]: E1011 07:09:10.339583 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04210186-75b3-4eaa-9eb6-f4b5c41af23a" containerName="pull" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.339591 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="04210186-75b3-4eaa-9eb6-f4b5c41af23a" containerName="pull" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.339711 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbad9e64-0bb0-4acc-9bff-63234abcf93c" containerName="console" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.339730 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="04210186-75b3-4eaa-9eb6-f4b5c41af23a" containerName="extract" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.340255 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.346268 5055 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.347149 5055 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.347424 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.347586 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.347912 5055 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-fpw9v" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.367971 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd"] Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.454815 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d3f5f61f-8260-48f5-8dd7-7c139cbad658-webhook-cert\") pod \"metallb-operator-controller-manager-8479555c6b-xzdfd\" (UID: \"d3f5f61f-8260-48f5-8dd7-7c139cbad658\") " pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.454940 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4fpc\" (UniqueName: \"kubernetes.io/projected/d3f5f61f-8260-48f5-8dd7-7c139cbad658-kube-api-access-s4fpc\") pod \"metallb-operator-controller-manager-8479555c6b-xzdfd\" (UID: \"d3f5f61f-8260-48f5-8dd7-7c139cbad658\") " pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.454987 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d3f5f61f-8260-48f5-8dd7-7c139cbad658-apiservice-cert\") pod \"metallb-operator-controller-manager-8479555c6b-xzdfd\" (UID: \"d3f5f61f-8260-48f5-8dd7-7c139cbad658\") " pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.556522 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4fpc\" (UniqueName: \"kubernetes.io/projected/d3f5f61f-8260-48f5-8dd7-7c139cbad658-kube-api-access-s4fpc\") pod \"metallb-operator-controller-manager-8479555c6b-xzdfd\" (UID: \"d3f5f61f-8260-48f5-8dd7-7c139cbad658\") " pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.556589 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d3f5f61f-8260-48f5-8dd7-7c139cbad658-apiservice-cert\") pod \"metallb-operator-controller-manager-8479555c6b-xzdfd\" (UID: \"d3f5f61f-8260-48f5-8dd7-7c139cbad658\") " pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.556637 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d3f5f61f-8260-48f5-8dd7-7c139cbad658-webhook-cert\") pod \"metallb-operator-controller-manager-8479555c6b-xzdfd\" (UID: \"d3f5f61f-8260-48f5-8dd7-7c139cbad658\") " pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.565779 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d3f5f61f-8260-48f5-8dd7-7c139cbad658-webhook-cert\") pod \"metallb-operator-controller-manager-8479555c6b-xzdfd\" (UID: \"d3f5f61f-8260-48f5-8dd7-7c139cbad658\") " pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.565831 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d3f5f61f-8260-48f5-8dd7-7c139cbad658-apiservice-cert\") pod \"metallb-operator-controller-manager-8479555c6b-xzdfd\" (UID: \"d3f5f61f-8260-48f5-8dd7-7c139cbad658\") " pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.571695 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4fpc\" (UniqueName: \"kubernetes.io/projected/d3f5f61f-8260-48f5-8dd7-7c139cbad658-kube-api-access-s4fpc\") pod \"metallb-operator-controller-manager-8479555c6b-xzdfd\" (UID: \"d3f5f61f-8260-48f5-8dd7-7c139cbad658\") " pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.652780 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv"] Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.653438 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.656034 5055 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.656216 5055 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.659116 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.659518 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/efefbb28-ed08-46fe-ac6b-e819db7e3b0b-webhook-cert\") pod \"metallb-operator-webhook-server-6bdb447db-9zwvv\" (UID: \"efefbb28-ed08-46fe-ac6b-e819db7e3b0b\") " pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.659577 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/efefbb28-ed08-46fe-ac6b-e819db7e3b0b-apiservice-cert\") pod \"metallb-operator-webhook-server-6bdb447db-9zwvv\" (UID: \"efefbb28-ed08-46fe-ac6b-e819db7e3b0b\") " pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.659594 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6zd4\" (UniqueName: \"kubernetes.io/projected/efefbb28-ed08-46fe-ac6b-e819db7e3b0b-kube-api-access-x6zd4\") pod \"metallb-operator-webhook-server-6bdb447db-9zwvv\" (UID: \"efefbb28-ed08-46fe-ac6b-e819db7e3b0b\") " pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.667334 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv"] Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.669138 5055 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-bsn4p" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.760304 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/efefbb28-ed08-46fe-ac6b-e819db7e3b0b-webhook-cert\") pod \"metallb-operator-webhook-server-6bdb447db-9zwvv\" (UID: \"efefbb28-ed08-46fe-ac6b-e819db7e3b0b\") " pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.760638 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/efefbb28-ed08-46fe-ac6b-e819db7e3b0b-apiservice-cert\") pod \"metallb-operator-webhook-server-6bdb447db-9zwvv\" (UID: \"efefbb28-ed08-46fe-ac6b-e819db7e3b0b\") " pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.760659 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6zd4\" (UniqueName: \"kubernetes.io/projected/efefbb28-ed08-46fe-ac6b-e819db7e3b0b-kube-api-access-x6zd4\") pod \"metallb-operator-webhook-server-6bdb447db-9zwvv\" (UID: \"efefbb28-ed08-46fe-ac6b-e819db7e3b0b\") " pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.765386 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/efefbb28-ed08-46fe-ac6b-e819db7e3b0b-webhook-cert\") pod \"metallb-operator-webhook-server-6bdb447db-9zwvv\" (UID: \"efefbb28-ed08-46fe-ac6b-e819db7e3b0b\") " pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.765468 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/efefbb28-ed08-46fe-ac6b-e819db7e3b0b-apiservice-cert\") pod \"metallb-operator-webhook-server-6bdb447db-9zwvv\" (UID: \"efefbb28-ed08-46fe-ac6b-e819db7e3b0b\") " pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.781503 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6zd4\" (UniqueName: \"kubernetes.io/projected/efefbb28-ed08-46fe-ac6b-e819db7e3b0b-kube-api-access-x6zd4\") pod \"metallb-operator-webhook-server-6bdb447db-9zwvv\" (UID: \"efefbb28-ed08-46fe-ac6b-e819db7e3b0b\") " pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.915084 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd"] Oct 11 07:09:10 crc kubenswrapper[5055]: W1011 07:09:10.921221 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd3f5f61f_8260_48f5_8dd7_7c139cbad658.slice/crio-31a7e13917561e8679ca901884c9e42dc4811f901f4364423f80e0f5321db8af WatchSource:0}: Error finding container 31a7e13917561e8679ca901884c9e42dc4811f901f4364423f80e0f5321db8af: Status 404 returned error can't find the container with id 31a7e13917561e8679ca901884c9e42dc4811f901f4364423f80e0f5321db8af Oct 11 07:09:10 crc kubenswrapper[5055]: I1011 07:09:10.969223 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" Oct 11 07:09:11 crc kubenswrapper[5055]: I1011 07:09:11.244804 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv"] Oct 11 07:09:11 crc kubenswrapper[5055]: W1011 07:09:11.259209 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podefefbb28_ed08_46fe_ac6b_e819db7e3b0b.slice/crio-589448f48b29f966d7c5ecc0e431c878087b36894ac5f061763463142b3d3035 WatchSource:0}: Error finding container 589448f48b29f966d7c5ecc0e431c878087b36894ac5f061763463142b3d3035: Status 404 returned error can't find the container with id 589448f48b29f966d7c5ecc0e431c878087b36894ac5f061763463142b3d3035 Oct 11 07:09:11 crc kubenswrapper[5055]: I1011 07:09:11.918451 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" event={"ID":"d3f5f61f-8260-48f5-8dd7-7c139cbad658","Type":"ContainerStarted","Data":"31a7e13917561e8679ca901884c9e42dc4811f901f4364423f80e0f5321db8af"} Oct 11 07:09:11 crc kubenswrapper[5055]: I1011 07:09:11.919640 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" event={"ID":"efefbb28-ed08-46fe-ac6b-e819db7e3b0b","Type":"ContainerStarted","Data":"589448f48b29f966d7c5ecc0e431c878087b36894ac5f061763463142b3d3035"} Oct 11 07:09:13 crc kubenswrapper[5055]: I1011 07:09:13.931636 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" event={"ID":"d3f5f61f-8260-48f5-8dd7-7c139cbad658","Type":"ContainerStarted","Data":"a9295578a4efe3333b58fd09122a1be9b91d466e99facd7afa840c022a0a02fe"} Oct 11 07:09:13 crc kubenswrapper[5055]: I1011 07:09:13.932236 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" Oct 11 07:09:13 crc kubenswrapper[5055]: I1011 07:09:13.954177 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" podStartSLOduration=1.344878149 podStartE2EDuration="3.954160017s" podCreationTimestamp="2025-10-11 07:09:10 +0000 UTC" firstStartedPulling="2025-10-11 07:09:10.923518798 +0000 UTC m=+934.697792605" lastFinishedPulling="2025-10-11 07:09:13.532800666 +0000 UTC m=+937.307074473" observedRunningTime="2025-10-11 07:09:13.949912808 +0000 UTC m=+937.724186615" watchObservedRunningTime="2025-10-11 07:09:13.954160017 +0000 UTC m=+937.728433814" Oct 11 07:09:15 crc kubenswrapper[5055]: I1011 07:09:15.941307 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" event={"ID":"efefbb28-ed08-46fe-ac6b-e819db7e3b0b","Type":"ContainerStarted","Data":"f5879d24c1f263beb0cf1e6e421a018b43ff95c91f528e4eebd7f3f26f0dca1c"} Oct 11 07:09:15 crc kubenswrapper[5055]: I1011 07:09:15.941447 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" Oct 11 07:09:15 crc kubenswrapper[5055]: I1011 07:09:15.958497 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" podStartSLOduration=2.004357653 podStartE2EDuration="5.958478855s" podCreationTimestamp="2025-10-11 07:09:10 +0000 UTC" firstStartedPulling="2025-10-11 07:09:11.262658629 +0000 UTC m=+935.036932436" lastFinishedPulling="2025-10-11 07:09:15.216779831 +0000 UTC m=+938.991053638" observedRunningTime="2025-10-11 07:09:15.957355044 +0000 UTC m=+939.731628861" watchObservedRunningTime="2025-10-11 07:09:15.958478855 +0000 UTC m=+939.732752672" Oct 11 07:09:30 crc kubenswrapper[5055]: I1011 07:09:30.973506 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-6bdb447db-9zwvv" Oct 11 07:09:32 crc kubenswrapper[5055]: I1011 07:09:32.422213 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:09:32 crc kubenswrapper[5055]: I1011 07:09:32.422519 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:09:32 crc kubenswrapper[5055]: I1011 07:09:32.422727 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 07:09:32 crc kubenswrapper[5055]: I1011 07:09:32.423231 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"544f62b5c981db9fe174f64795733dee914a3486ae2b8f0ec320ed7466166586"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 07:09:32 crc kubenswrapper[5055]: I1011 07:09:32.423272 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://544f62b5c981db9fe174f64795733dee914a3486ae2b8f0ec320ed7466166586" gracePeriod=600 Oct 11 07:09:33 crc kubenswrapper[5055]: I1011 07:09:33.049102 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="544f62b5c981db9fe174f64795733dee914a3486ae2b8f0ec320ed7466166586" exitCode=0 Oct 11 07:09:33 crc kubenswrapper[5055]: I1011 07:09:33.049173 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"544f62b5c981db9fe174f64795733dee914a3486ae2b8f0ec320ed7466166586"} Oct 11 07:09:33 crc kubenswrapper[5055]: I1011 07:09:33.049585 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"535dac1f0973003af975be73a91e8fd4d0110e656b0a5fa5041beaefb3cb8f58"} Oct 11 07:09:33 crc kubenswrapper[5055]: I1011 07:09:33.049607 5055 scope.go:117] "RemoveContainer" containerID="f20ff53f9730b9dbe8d7b3de68e2425dea7f2c46dfb8cf2611b1fe493a8836b0" Oct 11 07:09:50 crc kubenswrapper[5055]: I1011 07:09:50.661876 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-8479555c6b-xzdfd" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.510170 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9"] Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.511439 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.513367 5055 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.514117 5055 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-drvfr" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.527780 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9"] Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.534699 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-5fz6w"] Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.537136 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.539798 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.540020 5055 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.597305 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-tlblh"] Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.598181 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-tlblh" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.600304 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.600305 5055 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.600547 5055 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.603527 5055 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-bq7h5" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.608219 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-68d546b9d8-r94c9"] Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.609098 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-68d546b9d8-r94c9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.611313 5055 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.631584 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-68d546b9d8-r94c9"] Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.668868 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/4d7343fd-2840-432d-aa04-4a8330fa4b09-metrics\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.668935 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/10b6afa5-8fa5-4580-8da4-1ba5ad7adca1-cert\") pod \"frr-k8s-webhook-server-64bf5d555-p2ns9\" (UID: \"10b6afa5-8fa5-4580-8da4-1ba5ad7adca1\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.668975 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/4d7343fd-2840-432d-aa04-4a8330fa4b09-frr-startup\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.669090 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/4d7343fd-2840-432d-aa04-4a8330fa4b09-reloader\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.669159 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/4d7343fd-2840-432d-aa04-4a8330fa4b09-frr-conf\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.669186 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4d7343fd-2840-432d-aa04-4a8330fa4b09-metrics-certs\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.669246 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zk6v\" (UniqueName: \"kubernetes.io/projected/4d7343fd-2840-432d-aa04-4a8330fa4b09-kube-api-access-9zk6v\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.669313 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58czj\" (UniqueName: \"kubernetes.io/projected/10b6afa5-8fa5-4580-8da4-1ba5ad7adca1-kube-api-access-58czj\") pod \"frr-k8s-webhook-server-64bf5d555-p2ns9\" (UID: \"10b6afa5-8fa5-4580-8da4-1ba5ad7adca1\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.669389 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/4d7343fd-2840-432d-aa04-4a8330fa4b09-frr-sockets\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771152 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zk6v\" (UniqueName: \"kubernetes.io/projected/4d7343fd-2840-432d-aa04-4a8330fa4b09-kube-api-access-9zk6v\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771239 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58czj\" (UniqueName: \"kubernetes.io/projected/10b6afa5-8fa5-4580-8da4-1ba5ad7adca1-kube-api-access-58czj\") pod \"frr-k8s-webhook-server-64bf5d555-p2ns9\" (UID: \"10b6afa5-8fa5-4580-8da4-1ba5ad7adca1\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771263 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7-cert\") pod \"controller-68d546b9d8-r94c9\" (UID: \"89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7\") " pod="metallb-system/controller-68d546b9d8-r94c9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771296 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/40eb46a9-72ab-4076-a44a-e76267ebb92c-metallb-excludel2\") pod \"speaker-tlblh\" (UID: \"40eb46a9-72ab-4076-a44a-e76267ebb92c\") " pod="metallb-system/speaker-tlblh" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771312 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7-metrics-certs\") pod \"controller-68d546b9d8-r94c9\" (UID: \"89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7\") " pod="metallb-system/controller-68d546b9d8-r94c9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771331 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/4d7343fd-2840-432d-aa04-4a8330fa4b09-frr-sockets\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771349 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jd76m\" (UniqueName: \"kubernetes.io/projected/89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7-kube-api-access-jd76m\") pod \"controller-68d546b9d8-r94c9\" (UID: \"89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7\") " pod="metallb-system/controller-68d546b9d8-r94c9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771366 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xq5z\" (UniqueName: \"kubernetes.io/projected/40eb46a9-72ab-4076-a44a-e76267ebb92c-kube-api-access-4xq5z\") pod \"speaker-tlblh\" (UID: \"40eb46a9-72ab-4076-a44a-e76267ebb92c\") " pod="metallb-system/speaker-tlblh" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771385 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/4d7343fd-2840-432d-aa04-4a8330fa4b09-metrics\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771407 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/10b6afa5-8fa5-4580-8da4-1ba5ad7adca1-cert\") pod \"frr-k8s-webhook-server-64bf5d555-p2ns9\" (UID: \"10b6afa5-8fa5-4580-8da4-1ba5ad7adca1\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771428 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/40eb46a9-72ab-4076-a44a-e76267ebb92c-memberlist\") pod \"speaker-tlblh\" (UID: \"40eb46a9-72ab-4076-a44a-e76267ebb92c\") " pod="metallb-system/speaker-tlblh" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771457 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/4d7343fd-2840-432d-aa04-4a8330fa4b09-frr-startup\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771481 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/4d7343fd-2840-432d-aa04-4a8330fa4b09-reloader\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771498 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/4d7343fd-2840-432d-aa04-4a8330fa4b09-frr-conf\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771511 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4d7343fd-2840-432d-aa04-4a8330fa4b09-metrics-certs\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.771530 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/40eb46a9-72ab-4076-a44a-e76267ebb92c-metrics-certs\") pod \"speaker-tlblh\" (UID: \"40eb46a9-72ab-4076-a44a-e76267ebb92c\") " pod="metallb-system/speaker-tlblh" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.772341 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/4d7343fd-2840-432d-aa04-4a8330fa4b09-frr-sockets\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.772535 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/4d7343fd-2840-432d-aa04-4a8330fa4b09-metrics\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.773577 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/4d7343fd-2840-432d-aa04-4a8330fa4b09-reloader\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.773680 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/4d7343fd-2840-432d-aa04-4a8330fa4b09-frr-conf\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.773807 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/4d7343fd-2840-432d-aa04-4a8330fa4b09-frr-startup\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.778155 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/10b6afa5-8fa5-4580-8da4-1ba5ad7adca1-cert\") pod \"frr-k8s-webhook-server-64bf5d555-p2ns9\" (UID: \"10b6afa5-8fa5-4580-8da4-1ba5ad7adca1\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.778213 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4d7343fd-2840-432d-aa04-4a8330fa4b09-metrics-certs\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.791658 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zk6v\" (UniqueName: \"kubernetes.io/projected/4d7343fd-2840-432d-aa04-4a8330fa4b09-kube-api-access-9zk6v\") pod \"frr-k8s-5fz6w\" (UID: \"4d7343fd-2840-432d-aa04-4a8330fa4b09\") " pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.793655 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58czj\" (UniqueName: \"kubernetes.io/projected/10b6afa5-8fa5-4580-8da4-1ba5ad7adca1-kube-api-access-58czj\") pod \"frr-k8s-webhook-server-64bf5d555-p2ns9\" (UID: \"10b6afa5-8fa5-4580-8da4-1ba5ad7adca1\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.834429 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.852161 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.873136 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xq5z\" (UniqueName: \"kubernetes.io/projected/40eb46a9-72ab-4076-a44a-e76267ebb92c-kube-api-access-4xq5z\") pod \"speaker-tlblh\" (UID: \"40eb46a9-72ab-4076-a44a-e76267ebb92c\") " pod="metallb-system/speaker-tlblh" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.873203 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/40eb46a9-72ab-4076-a44a-e76267ebb92c-memberlist\") pod \"speaker-tlblh\" (UID: \"40eb46a9-72ab-4076-a44a-e76267ebb92c\") " pod="metallb-system/speaker-tlblh" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.873266 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/40eb46a9-72ab-4076-a44a-e76267ebb92c-metrics-certs\") pod \"speaker-tlblh\" (UID: \"40eb46a9-72ab-4076-a44a-e76267ebb92c\") " pod="metallb-system/speaker-tlblh" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.873306 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7-cert\") pod \"controller-68d546b9d8-r94c9\" (UID: \"89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7\") " pod="metallb-system/controller-68d546b9d8-r94c9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.873353 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/40eb46a9-72ab-4076-a44a-e76267ebb92c-metallb-excludel2\") pod \"speaker-tlblh\" (UID: \"40eb46a9-72ab-4076-a44a-e76267ebb92c\") " pod="metallb-system/speaker-tlblh" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.873378 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7-metrics-certs\") pod \"controller-68d546b9d8-r94c9\" (UID: \"89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7\") " pod="metallb-system/controller-68d546b9d8-r94c9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.873400 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jd76m\" (UniqueName: \"kubernetes.io/projected/89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7-kube-api-access-jd76m\") pod \"controller-68d546b9d8-r94c9\" (UID: \"89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7\") " pod="metallb-system/controller-68d546b9d8-r94c9" Oct 11 07:09:51 crc kubenswrapper[5055]: E1011 07:09:51.873399 5055 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Oct 11 07:09:51 crc kubenswrapper[5055]: E1011 07:09:51.873485 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/40eb46a9-72ab-4076-a44a-e76267ebb92c-memberlist podName:40eb46a9-72ab-4076-a44a-e76267ebb92c nodeName:}" failed. No retries permitted until 2025-10-11 07:09:52.37346506 +0000 UTC m=+976.147738867 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/40eb46a9-72ab-4076-a44a-e76267ebb92c-memberlist") pod "speaker-tlblh" (UID: "40eb46a9-72ab-4076-a44a-e76267ebb92c") : secret "metallb-memberlist" not found Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.874156 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/40eb46a9-72ab-4076-a44a-e76267ebb92c-metallb-excludel2\") pod \"speaker-tlblh\" (UID: \"40eb46a9-72ab-4076-a44a-e76267ebb92c\") " pod="metallb-system/speaker-tlblh" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.876025 5055 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.878481 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7-metrics-certs\") pod \"controller-68d546b9d8-r94c9\" (UID: \"89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7\") " pod="metallb-system/controller-68d546b9d8-r94c9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.880438 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/40eb46a9-72ab-4076-a44a-e76267ebb92c-metrics-certs\") pod \"speaker-tlblh\" (UID: \"40eb46a9-72ab-4076-a44a-e76267ebb92c\") " pod="metallb-system/speaker-tlblh" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.886903 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7-cert\") pod \"controller-68d546b9d8-r94c9\" (UID: \"89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7\") " pod="metallb-system/controller-68d546b9d8-r94c9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.897452 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xq5z\" (UniqueName: \"kubernetes.io/projected/40eb46a9-72ab-4076-a44a-e76267ebb92c-kube-api-access-4xq5z\") pod \"speaker-tlblh\" (UID: \"40eb46a9-72ab-4076-a44a-e76267ebb92c\") " pod="metallb-system/speaker-tlblh" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.897436 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jd76m\" (UniqueName: \"kubernetes.io/projected/89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7-kube-api-access-jd76m\") pod \"controller-68d546b9d8-r94c9\" (UID: \"89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7\") " pod="metallb-system/controller-68d546b9d8-r94c9" Oct 11 07:09:51 crc kubenswrapper[5055]: I1011 07:09:51.928867 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-68d546b9d8-r94c9" Oct 11 07:09:52 crc kubenswrapper[5055]: I1011 07:09:52.041584 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9"] Oct 11 07:09:52 crc kubenswrapper[5055]: W1011 07:09:52.051870 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10b6afa5_8fa5_4580_8da4_1ba5ad7adca1.slice/crio-9dd3fbe7fbaac6cfe451bce3ade0ddef0adaf4bf9177032b08688f0a0d2b8e6f WatchSource:0}: Error finding container 9dd3fbe7fbaac6cfe451bce3ade0ddef0adaf4bf9177032b08688f0a0d2b8e6f: Status 404 returned error can't find the container with id 9dd3fbe7fbaac6cfe451bce3ade0ddef0adaf4bf9177032b08688f0a0d2b8e6f Oct 11 07:09:52 crc kubenswrapper[5055]: I1011 07:09:52.123747 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-68d546b9d8-r94c9"] Oct 11 07:09:52 crc kubenswrapper[5055]: W1011 07:09:52.128677 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89f1b37c_5214_4b4d_b2a6_3543ef7bd2e7.slice/crio-0b30924d52735432615e399d0b90bb78e0dd666ed202f349c13cf1eae05091ae WatchSource:0}: Error finding container 0b30924d52735432615e399d0b90bb78e0dd666ed202f349c13cf1eae05091ae: Status 404 returned error can't find the container with id 0b30924d52735432615e399d0b90bb78e0dd666ed202f349c13cf1eae05091ae Oct 11 07:09:52 crc kubenswrapper[5055]: I1011 07:09:52.156995 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-r94c9" event={"ID":"89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7","Type":"ContainerStarted","Data":"0b30924d52735432615e399d0b90bb78e0dd666ed202f349c13cf1eae05091ae"} Oct 11 07:09:52 crc kubenswrapper[5055]: I1011 07:09:52.158198 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9" event={"ID":"10b6afa5-8fa5-4580-8da4-1ba5ad7adca1","Type":"ContainerStarted","Data":"9dd3fbe7fbaac6cfe451bce3ade0ddef0adaf4bf9177032b08688f0a0d2b8e6f"} Oct 11 07:09:52 crc kubenswrapper[5055]: I1011 07:09:52.159250 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5fz6w" event={"ID":"4d7343fd-2840-432d-aa04-4a8330fa4b09","Type":"ContainerStarted","Data":"1e36394fab3250646e690d9bbcbade960d4e80194f0e8c74718310fb8d6c6c92"} Oct 11 07:09:52 crc kubenswrapper[5055]: I1011 07:09:52.379333 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/40eb46a9-72ab-4076-a44a-e76267ebb92c-memberlist\") pod \"speaker-tlblh\" (UID: \"40eb46a9-72ab-4076-a44a-e76267ebb92c\") " pod="metallb-system/speaker-tlblh" Oct 11 07:09:52 crc kubenswrapper[5055]: I1011 07:09:52.384129 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/40eb46a9-72ab-4076-a44a-e76267ebb92c-memberlist\") pod \"speaker-tlblh\" (UID: \"40eb46a9-72ab-4076-a44a-e76267ebb92c\") " pod="metallb-system/speaker-tlblh" Oct 11 07:09:52 crc kubenswrapper[5055]: I1011 07:09:52.518921 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-tlblh" Oct 11 07:09:52 crc kubenswrapper[5055]: W1011 07:09:52.536411 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40eb46a9_72ab_4076_a44a_e76267ebb92c.slice/crio-17057a8fbd98a5eb67754690e2f325233f1ac7112c7b922aa3d62ede3203027b WatchSource:0}: Error finding container 17057a8fbd98a5eb67754690e2f325233f1ac7112c7b922aa3d62ede3203027b: Status 404 returned error can't find the container with id 17057a8fbd98a5eb67754690e2f325233f1ac7112c7b922aa3d62ede3203027b Oct 11 07:09:53 crc kubenswrapper[5055]: I1011 07:09:53.168308 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-r94c9" event={"ID":"89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7","Type":"ContainerStarted","Data":"8592b8270a4eb6a1c1069df7b2c7eb90f0e82168b4716ed826d9c87968b8d2e3"} Oct 11 07:09:53 crc kubenswrapper[5055]: I1011 07:09:53.168370 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-r94c9" event={"ID":"89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7","Type":"ContainerStarted","Data":"75d91f591cc2f470d55a4dd9573d03d687275889eb25b5cdb685d149ccaa390d"} Oct 11 07:09:53 crc kubenswrapper[5055]: I1011 07:09:53.168431 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-68d546b9d8-r94c9" Oct 11 07:09:53 crc kubenswrapper[5055]: I1011 07:09:53.174901 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tlblh" event={"ID":"40eb46a9-72ab-4076-a44a-e76267ebb92c","Type":"ContainerStarted","Data":"70ee2fc53d3511feb1ab6194c195dd03fe691f5d470201f0e498ca5c25cdb57c"} Oct 11 07:09:53 crc kubenswrapper[5055]: I1011 07:09:53.174962 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tlblh" event={"ID":"40eb46a9-72ab-4076-a44a-e76267ebb92c","Type":"ContainerStarted","Data":"481daf8bf5ed1e27eb64459b8e0c8b13f31075b1bc40ef488d39d3ccd929b633"} Oct 11 07:09:53 crc kubenswrapper[5055]: I1011 07:09:53.174976 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-tlblh" event={"ID":"40eb46a9-72ab-4076-a44a-e76267ebb92c","Type":"ContainerStarted","Data":"17057a8fbd98a5eb67754690e2f325233f1ac7112c7b922aa3d62ede3203027b"} Oct 11 07:09:53 crc kubenswrapper[5055]: I1011 07:09:53.175128 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-tlblh" Oct 11 07:09:53 crc kubenswrapper[5055]: I1011 07:09:53.194267 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-68d546b9d8-r94c9" podStartSLOduration=2.194250142 podStartE2EDuration="2.194250142s" podCreationTimestamp="2025-10-11 07:09:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:09:53.185139461 +0000 UTC m=+976.959413258" watchObservedRunningTime="2025-10-11 07:09:53.194250142 +0000 UTC m=+976.968523949" Oct 11 07:09:57 crc kubenswrapper[5055]: I1011 07:09:57.021893 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-tlblh" podStartSLOduration=6.021872759 podStartE2EDuration="6.021872759s" podCreationTimestamp="2025-10-11 07:09:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:09:53.203465686 +0000 UTC m=+976.977739493" watchObservedRunningTime="2025-10-11 07:09:57.021872759 +0000 UTC m=+980.796146566" Oct 11 07:09:59 crc kubenswrapper[5055]: I1011 07:09:59.249332 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9" event={"ID":"10b6afa5-8fa5-4580-8da4-1ba5ad7adca1","Type":"ContainerStarted","Data":"940ecb1d94b2ecc4b27a3112d1c711c6b7d74c3b0bcdb5c2dc0eca500f7f03f1"} Oct 11 07:09:59 crc kubenswrapper[5055]: I1011 07:09:59.249692 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9" Oct 11 07:09:59 crc kubenswrapper[5055]: I1011 07:09:59.251126 5055 generic.go:334] "Generic (PLEG): container finished" podID="4d7343fd-2840-432d-aa04-4a8330fa4b09" containerID="0777cffdde09f7389207391e2102fa48eb3d4db07acbc6fe794c5f31a4afdf1f" exitCode=0 Oct 11 07:09:59 crc kubenswrapper[5055]: I1011 07:09:59.251162 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5fz6w" event={"ID":"4d7343fd-2840-432d-aa04-4a8330fa4b09","Type":"ContainerDied","Data":"0777cffdde09f7389207391e2102fa48eb3d4db07acbc6fe794c5f31a4afdf1f"} Oct 11 07:09:59 crc kubenswrapper[5055]: I1011 07:09:59.294547 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9" podStartSLOduration=1.871835351 podStartE2EDuration="8.29452635s" podCreationTimestamp="2025-10-11 07:09:51 +0000 UTC" firstStartedPulling="2025-10-11 07:09:52.055168999 +0000 UTC m=+975.829442806" lastFinishedPulling="2025-10-11 07:09:58.477859998 +0000 UTC m=+982.252133805" observedRunningTime="2025-10-11 07:09:59.290725151 +0000 UTC m=+983.064998958" watchObservedRunningTime="2025-10-11 07:09:59.29452635 +0000 UTC m=+983.068800167" Oct 11 07:10:00 crc kubenswrapper[5055]: I1011 07:10:00.261161 5055 generic.go:334] "Generic (PLEG): container finished" podID="4d7343fd-2840-432d-aa04-4a8330fa4b09" containerID="d3b48cb065cd269de0af1d4c2cc675f2778ceb5cae353e3a547c48a7abef7fe8" exitCode=0 Oct 11 07:10:00 crc kubenswrapper[5055]: I1011 07:10:00.261250 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5fz6w" event={"ID":"4d7343fd-2840-432d-aa04-4a8330fa4b09","Type":"ContainerDied","Data":"d3b48cb065cd269de0af1d4c2cc675f2778ceb5cae353e3a547c48a7abef7fe8"} Oct 11 07:10:01 crc kubenswrapper[5055]: I1011 07:10:01.269644 5055 generic.go:334] "Generic (PLEG): container finished" podID="4d7343fd-2840-432d-aa04-4a8330fa4b09" containerID="0a82cc2628d5482cd21bf452c593532ef4f7c4b568f4de36d3752aff519d82e0" exitCode=0 Oct 11 07:10:01 crc kubenswrapper[5055]: I1011 07:10:01.269720 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5fz6w" event={"ID":"4d7343fd-2840-432d-aa04-4a8330fa4b09","Type":"ContainerDied","Data":"0a82cc2628d5482cd21bf452c593532ef4f7c4b568f4de36d3752aff519d82e0"} Oct 11 07:10:02 crc kubenswrapper[5055]: I1011 07:10:02.279702 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5fz6w" event={"ID":"4d7343fd-2840-432d-aa04-4a8330fa4b09","Type":"ContainerStarted","Data":"4464e37081fb6915889885b256c21d80ac5b19a5c6bc64eb4daa14d622b2bd2f"} Oct 11 07:10:02 crc kubenswrapper[5055]: I1011 07:10:02.280048 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5fz6w" event={"ID":"4d7343fd-2840-432d-aa04-4a8330fa4b09","Type":"ContainerStarted","Data":"950befaae95a99e046aca04825a98b31f7ae537eb7164ad0e2a84b8aa7ce6c83"} Oct 11 07:10:02 crc kubenswrapper[5055]: I1011 07:10:02.280064 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5fz6w" event={"ID":"4d7343fd-2840-432d-aa04-4a8330fa4b09","Type":"ContainerStarted","Data":"8a5c6706a19d248d70f9f79c9321f002acca9178bc3fa73652da142bb607fd46"} Oct 11 07:10:02 crc kubenswrapper[5055]: I1011 07:10:02.280074 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5fz6w" event={"ID":"4d7343fd-2840-432d-aa04-4a8330fa4b09","Type":"ContainerStarted","Data":"9a1d07ccb53fede18df0538d12e3887efbfb849f7225b85fce1fc3ffca2dd92e"} Oct 11 07:10:02 crc kubenswrapper[5055]: I1011 07:10:02.280084 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5fz6w" event={"ID":"4d7343fd-2840-432d-aa04-4a8330fa4b09","Type":"ContainerStarted","Data":"094805ea341bab85d4ec19b011d10f7f0b2e266daaa05b92646383aa7de973cb"} Oct 11 07:10:02 crc kubenswrapper[5055]: I1011 07:10:02.280093 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5fz6w" event={"ID":"4d7343fd-2840-432d-aa04-4a8330fa4b09","Type":"ContainerStarted","Data":"5617542a39388b608d94c420843c50a4bf18045c1fbeaa1f58f50fda79db8356"} Oct 11 07:10:02 crc kubenswrapper[5055]: I1011 07:10:02.297897 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-5fz6w" podStartSLOduration=4.85783644 podStartE2EDuration="11.297879117s" podCreationTimestamp="2025-10-11 07:09:51 +0000 UTC" firstStartedPulling="2025-10-11 07:09:52.024652754 +0000 UTC m=+975.798926561" lastFinishedPulling="2025-10-11 07:09:58.464695431 +0000 UTC m=+982.238969238" observedRunningTime="2025-10-11 07:10:02.297360052 +0000 UTC m=+986.071633879" watchObservedRunningTime="2025-10-11 07:10:02.297879117 +0000 UTC m=+986.072152924" Oct 11 07:10:02 crc kubenswrapper[5055]: I1011 07:10:02.522580 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-tlblh" Oct 11 07:10:03 crc kubenswrapper[5055]: I1011 07:10:03.285064 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.186968 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws"] Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.188298 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.196125 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.201595 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws"] Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.340281 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a970f2fd-e0e0-4837-88e5-09a8b071309b-util\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws\" (UID: \"a970f2fd-e0e0-4837-88e5-09a8b071309b\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.340366 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpfcv\" (UniqueName: \"kubernetes.io/projected/a970f2fd-e0e0-4837-88e5-09a8b071309b-kube-api-access-hpfcv\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws\" (UID: \"a970f2fd-e0e0-4837-88e5-09a8b071309b\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.340408 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a970f2fd-e0e0-4837-88e5-09a8b071309b-bundle\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws\" (UID: \"a970f2fd-e0e0-4837-88e5-09a8b071309b\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.442177 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a970f2fd-e0e0-4837-88e5-09a8b071309b-util\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws\" (UID: \"a970f2fd-e0e0-4837-88e5-09a8b071309b\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.442236 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpfcv\" (UniqueName: \"kubernetes.io/projected/a970f2fd-e0e0-4837-88e5-09a8b071309b-kube-api-access-hpfcv\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws\" (UID: \"a970f2fd-e0e0-4837-88e5-09a8b071309b\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.442266 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a970f2fd-e0e0-4837-88e5-09a8b071309b-bundle\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws\" (UID: \"a970f2fd-e0e0-4837-88e5-09a8b071309b\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.442637 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a970f2fd-e0e0-4837-88e5-09a8b071309b-bundle\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws\" (UID: \"a970f2fd-e0e0-4837-88e5-09a8b071309b\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.442855 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a970f2fd-e0e0-4837-88e5-09a8b071309b-util\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws\" (UID: \"a970f2fd-e0e0-4837-88e5-09a8b071309b\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.460562 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpfcv\" (UniqueName: \"kubernetes.io/projected/a970f2fd-e0e0-4837-88e5-09a8b071309b-kube-api-access-hpfcv\") pod \"695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws\" (UID: \"a970f2fd-e0e0-4837-88e5-09a8b071309b\") " pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.506390 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" Oct 11 07:10:04 crc kubenswrapper[5055]: I1011 07:10:04.719305 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws"] Oct 11 07:10:05 crc kubenswrapper[5055]: I1011 07:10:05.296897 5055 generic.go:334] "Generic (PLEG): container finished" podID="a970f2fd-e0e0-4837-88e5-09a8b071309b" containerID="c3284670be79762b40cb8ae74c70862d245444a38021f1c4de9fae3ebfce726d" exitCode=0 Oct 11 07:10:05 crc kubenswrapper[5055]: I1011 07:10:05.296994 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" event={"ID":"a970f2fd-e0e0-4837-88e5-09a8b071309b","Type":"ContainerDied","Data":"c3284670be79762b40cb8ae74c70862d245444a38021f1c4de9fae3ebfce726d"} Oct 11 07:10:05 crc kubenswrapper[5055]: I1011 07:10:05.297252 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" event={"ID":"a970f2fd-e0e0-4837-88e5-09a8b071309b","Type":"ContainerStarted","Data":"63858639d53583c8de6760b7ab26eda0aaa6d72349e12ed5c7a9e28e061184cb"} Oct 11 07:10:06 crc kubenswrapper[5055]: I1011 07:10:06.853168 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:10:06 crc kubenswrapper[5055]: I1011 07:10:06.905299 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:10:08 crc kubenswrapper[5055]: I1011 07:10:08.312672 5055 generic.go:334] "Generic (PLEG): container finished" podID="a970f2fd-e0e0-4837-88e5-09a8b071309b" containerID="6c34f97b1759be541f176666d14b10cecaf47b481c8857adbd147f2ba03c0bc4" exitCode=0 Oct 11 07:10:08 crc kubenswrapper[5055]: I1011 07:10:08.312908 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" event={"ID":"a970f2fd-e0e0-4837-88e5-09a8b071309b","Type":"ContainerDied","Data":"6c34f97b1759be541f176666d14b10cecaf47b481c8857adbd147f2ba03c0bc4"} Oct 11 07:10:09 crc kubenswrapper[5055]: I1011 07:10:09.320684 5055 generic.go:334] "Generic (PLEG): container finished" podID="a970f2fd-e0e0-4837-88e5-09a8b071309b" containerID="24a0593525a599e2d356fd1a9ca7646c569531b906ad77a75206574e2ee3d655" exitCode=0 Oct 11 07:10:09 crc kubenswrapper[5055]: I1011 07:10:09.320790 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" event={"ID":"a970f2fd-e0e0-4837-88e5-09a8b071309b","Type":"ContainerDied","Data":"24a0593525a599e2d356fd1a9ca7646c569531b906ad77a75206574e2ee3d655"} Oct 11 07:10:10 crc kubenswrapper[5055]: I1011 07:10:10.603124 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" Oct 11 07:10:10 crc kubenswrapper[5055]: I1011 07:10:10.725132 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hpfcv\" (UniqueName: \"kubernetes.io/projected/a970f2fd-e0e0-4837-88e5-09a8b071309b-kube-api-access-hpfcv\") pod \"a970f2fd-e0e0-4837-88e5-09a8b071309b\" (UID: \"a970f2fd-e0e0-4837-88e5-09a8b071309b\") " Oct 11 07:10:10 crc kubenswrapper[5055]: I1011 07:10:10.725200 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a970f2fd-e0e0-4837-88e5-09a8b071309b-bundle\") pod \"a970f2fd-e0e0-4837-88e5-09a8b071309b\" (UID: \"a970f2fd-e0e0-4837-88e5-09a8b071309b\") " Oct 11 07:10:10 crc kubenswrapper[5055]: I1011 07:10:10.726232 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a970f2fd-e0e0-4837-88e5-09a8b071309b-util\") pod \"a970f2fd-e0e0-4837-88e5-09a8b071309b\" (UID: \"a970f2fd-e0e0-4837-88e5-09a8b071309b\") " Oct 11 07:10:10 crc kubenswrapper[5055]: I1011 07:10:10.728534 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a970f2fd-e0e0-4837-88e5-09a8b071309b-bundle" (OuterVolumeSpecName: "bundle") pod "a970f2fd-e0e0-4837-88e5-09a8b071309b" (UID: "a970f2fd-e0e0-4837-88e5-09a8b071309b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:10:10 crc kubenswrapper[5055]: I1011 07:10:10.730693 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a970f2fd-e0e0-4837-88e5-09a8b071309b-kube-api-access-hpfcv" (OuterVolumeSpecName: "kube-api-access-hpfcv") pod "a970f2fd-e0e0-4837-88e5-09a8b071309b" (UID: "a970f2fd-e0e0-4837-88e5-09a8b071309b"). InnerVolumeSpecName "kube-api-access-hpfcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:10:10 crc kubenswrapper[5055]: I1011 07:10:10.735815 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a970f2fd-e0e0-4837-88e5-09a8b071309b-util" (OuterVolumeSpecName: "util") pod "a970f2fd-e0e0-4837-88e5-09a8b071309b" (UID: "a970f2fd-e0e0-4837-88e5-09a8b071309b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:10:10 crc kubenswrapper[5055]: I1011 07:10:10.827713 5055 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a970f2fd-e0e0-4837-88e5-09a8b071309b-util\") on node \"crc\" DevicePath \"\"" Oct 11 07:10:10 crc kubenswrapper[5055]: I1011 07:10:10.827757 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hpfcv\" (UniqueName: \"kubernetes.io/projected/a970f2fd-e0e0-4837-88e5-09a8b071309b-kube-api-access-hpfcv\") on node \"crc\" DevicePath \"\"" Oct 11 07:10:10 crc kubenswrapper[5055]: I1011 07:10:10.827833 5055 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a970f2fd-e0e0-4837-88e5-09a8b071309b-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:10:11 crc kubenswrapper[5055]: I1011 07:10:11.335732 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" event={"ID":"a970f2fd-e0e0-4837-88e5-09a8b071309b","Type":"ContainerDied","Data":"63858639d53583c8de6760b7ab26eda0aaa6d72349e12ed5c7a9e28e061184cb"} Oct 11 07:10:11 crc kubenswrapper[5055]: I1011 07:10:11.335787 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="63858639d53583c8de6760b7ab26eda0aaa6d72349e12ed5c7a9e28e061184cb" Oct 11 07:10:11 crc kubenswrapper[5055]: I1011 07:10:11.336332 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws" Oct 11 07:10:11 crc kubenswrapper[5055]: I1011 07:10:11.841172 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-p2ns9" Oct 11 07:10:11 crc kubenswrapper[5055]: I1011 07:10:11.855422 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-5fz6w" Oct 11 07:10:11 crc kubenswrapper[5055]: I1011 07:10:11.932320 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-68d546b9d8-r94c9" Oct 11 07:10:16 crc kubenswrapper[5055]: I1011 07:10:16.635970 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-5xhqq"] Oct 11 07:10:16 crc kubenswrapper[5055]: E1011 07:10:16.637388 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a970f2fd-e0e0-4837-88e5-09a8b071309b" containerName="extract" Oct 11 07:10:16 crc kubenswrapper[5055]: I1011 07:10:16.637499 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="a970f2fd-e0e0-4837-88e5-09a8b071309b" containerName="extract" Oct 11 07:10:16 crc kubenswrapper[5055]: E1011 07:10:16.637608 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a970f2fd-e0e0-4837-88e5-09a8b071309b" containerName="pull" Oct 11 07:10:16 crc kubenswrapper[5055]: I1011 07:10:16.637674 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="a970f2fd-e0e0-4837-88e5-09a8b071309b" containerName="pull" Oct 11 07:10:16 crc kubenswrapper[5055]: E1011 07:10:16.637749 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a970f2fd-e0e0-4837-88e5-09a8b071309b" containerName="util" Oct 11 07:10:16 crc kubenswrapper[5055]: I1011 07:10:16.637829 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="a970f2fd-e0e0-4837-88e5-09a8b071309b" containerName="util" Oct 11 07:10:16 crc kubenswrapper[5055]: I1011 07:10:16.638043 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="a970f2fd-e0e0-4837-88e5-09a8b071309b" containerName="extract" Oct 11 07:10:16 crc kubenswrapper[5055]: I1011 07:10:16.638610 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-5xhqq" Oct 11 07:10:16 crc kubenswrapper[5055]: I1011 07:10:16.640911 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Oct 11 07:10:16 crc kubenswrapper[5055]: I1011 07:10:16.642938 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Oct 11 07:10:16 crc kubenswrapper[5055]: I1011 07:10:16.645642 5055 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-mb9wl" Oct 11 07:10:16 crc kubenswrapper[5055]: I1011 07:10:16.651931 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-5xhqq"] Oct 11 07:10:16 crc kubenswrapper[5055]: I1011 07:10:16.715243 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qptxx\" (UniqueName: \"kubernetes.io/projected/24f02977-8f26-4c52-9fd9-61dc01e27795-kube-api-access-qptxx\") pod \"cert-manager-operator-controller-manager-57cd46d6d-5xhqq\" (UID: \"24f02977-8f26-4c52-9fd9-61dc01e27795\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-5xhqq" Oct 11 07:10:16 crc kubenswrapper[5055]: I1011 07:10:16.815648 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qptxx\" (UniqueName: \"kubernetes.io/projected/24f02977-8f26-4c52-9fd9-61dc01e27795-kube-api-access-qptxx\") pod \"cert-manager-operator-controller-manager-57cd46d6d-5xhqq\" (UID: \"24f02977-8f26-4c52-9fd9-61dc01e27795\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-5xhqq" Oct 11 07:10:16 crc kubenswrapper[5055]: I1011 07:10:16.844943 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qptxx\" (UniqueName: \"kubernetes.io/projected/24f02977-8f26-4c52-9fd9-61dc01e27795-kube-api-access-qptxx\") pod \"cert-manager-operator-controller-manager-57cd46d6d-5xhqq\" (UID: \"24f02977-8f26-4c52-9fd9-61dc01e27795\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-5xhqq" Oct 11 07:10:16 crc kubenswrapper[5055]: I1011 07:10:16.955649 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-5xhqq" Oct 11 07:10:17 crc kubenswrapper[5055]: W1011 07:10:17.356041 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod24f02977_8f26_4c52_9fd9_61dc01e27795.slice/crio-1be5b1298a98a4f2f28d8634dc81eff08e3a83b0f054bde9801fb78c04a3cbe5 WatchSource:0}: Error finding container 1be5b1298a98a4f2f28d8634dc81eff08e3a83b0f054bde9801fb78c04a3cbe5: Status 404 returned error can't find the container with id 1be5b1298a98a4f2f28d8634dc81eff08e3a83b0f054bde9801fb78c04a3cbe5 Oct 11 07:10:17 crc kubenswrapper[5055]: I1011 07:10:17.360752 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-5xhqq"] Oct 11 07:10:17 crc kubenswrapper[5055]: I1011 07:10:17.372157 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-5xhqq" event={"ID":"24f02977-8f26-4c52-9fd9-61dc01e27795","Type":"ContainerStarted","Data":"1be5b1298a98a4f2f28d8634dc81eff08e3a83b0f054bde9801fb78c04a3cbe5"} Oct 11 07:10:24 crc kubenswrapper[5055]: I1011 07:10:24.434472 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-5xhqq" event={"ID":"24f02977-8f26-4c52-9fd9-61dc01e27795","Type":"ContainerStarted","Data":"d775852b438766d567fabf4a6913dc1b6f133b71b34094073d12a7fad1b5c8a8"} Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.119681 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-57cd46d6d-5xhqq" podStartSLOduration=3.163754103 podStartE2EDuration="9.119660041s" podCreationTimestamp="2025-10-11 07:10:16 +0000 UTC" firstStartedPulling="2025-10-11 07:10:17.365968616 +0000 UTC m=+1001.140242423" lastFinishedPulling="2025-10-11 07:10:23.321874554 +0000 UTC m=+1007.096148361" observedRunningTime="2025-10-11 07:10:24.470645446 +0000 UTC m=+1008.244919293" watchObservedRunningTime="2025-10-11 07:10:25.119660041 +0000 UTC m=+1008.893933858" Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.120339 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-d969966f-lqblm"] Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.121114 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-d969966f-lqblm" Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.122719 5055 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-wrccw" Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.122732 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.123022 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.128580 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-d969966f-lqblm"] Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.231011 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crlcj\" (UniqueName: \"kubernetes.io/projected/f879cfe7-12ac-431e-a7d3-a3c3bff29401-kube-api-access-crlcj\") pod \"cert-manager-webhook-d969966f-lqblm\" (UID: \"f879cfe7-12ac-431e-a7d3-a3c3bff29401\") " pod="cert-manager/cert-manager-webhook-d969966f-lqblm" Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.231181 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f879cfe7-12ac-431e-a7d3-a3c3bff29401-bound-sa-token\") pod \"cert-manager-webhook-d969966f-lqblm\" (UID: \"f879cfe7-12ac-431e-a7d3-a3c3bff29401\") " pod="cert-manager/cert-manager-webhook-d969966f-lqblm" Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.332933 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crlcj\" (UniqueName: \"kubernetes.io/projected/f879cfe7-12ac-431e-a7d3-a3c3bff29401-kube-api-access-crlcj\") pod \"cert-manager-webhook-d969966f-lqblm\" (UID: \"f879cfe7-12ac-431e-a7d3-a3c3bff29401\") " pod="cert-manager/cert-manager-webhook-d969966f-lqblm" Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.333030 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f879cfe7-12ac-431e-a7d3-a3c3bff29401-bound-sa-token\") pod \"cert-manager-webhook-d969966f-lqblm\" (UID: \"f879cfe7-12ac-431e-a7d3-a3c3bff29401\") " pod="cert-manager/cert-manager-webhook-d969966f-lqblm" Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.355073 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crlcj\" (UniqueName: \"kubernetes.io/projected/f879cfe7-12ac-431e-a7d3-a3c3bff29401-kube-api-access-crlcj\") pod \"cert-manager-webhook-d969966f-lqblm\" (UID: \"f879cfe7-12ac-431e-a7d3-a3c3bff29401\") " pod="cert-manager/cert-manager-webhook-d969966f-lqblm" Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.355851 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f879cfe7-12ac-431e-a7d3-a3c3bff29401-bound-sa-token\") pod \"cert-manager-webhook-d969966f-lqblm\" (UID: \"f879cfe7-12ac-431e-a7d3-a3c3bff29401\") " pod="cert-manager/cert-manager-webhook-d969966f-lqblm" Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.436004 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-d969966f-lqblm" Oct 11 07:10:25 crc kubenswrapper[5055]: I1011 07:10:25.889984 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-d969966f-lqblm"] Oct 11 07:10:26 crc kubenswrapper[5055]: I1011 07:10:26.446247 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-d969966f-lqblm" event={"ID":"f879cfe7-12ac-431e-a7d3-a3c3bff29401","Type":"ContainerStarted","Data":"fce1c79746794bbf745b10426cd9cfe65c885c6b0ea623251d0261687a8a8cac"} Oct 11 07:10:28 crc kubenswrapper[5055]: I1011 07:10:28.070056 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7d9f95dbf-zvrrq"] Oct 11 07:10:28 crc kubenswrapper[5055]: I1011 07:10:28.071285 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-zvrrq" Oct 11 07:10:28 crc kubenswrapper[5055]: I1011 07:10:28.073671 5055 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-cb9bw" Oct 11 07:10:28 crc kubenswrapper[5055]: I1011 07:10:28.076194 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7d9f95dbf-zvrrq"] Oct 11 07:10:28 crc kubenswrapper[5055]: I1011 07:10:28.172241 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bc9180a6-db2c-45cc-aafa-285a56562abc-bound-sa-token\") pod \"cert-manager-cainjector-7d9f95dbf-zvrrq\" (UID: \"bc9180a6-db2c-45cc-aafa-285a56562abc\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-zvrrq" Oct 11 07:10:28 crc kubenswrapper[5055]: I1011 07:10:28.172291 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mp885\" (UniqueName: \"kubernetes.io/projected/bc9180a6-db2c-45cc-aafa-285a56562abc-kube-api-access-mp885\") pod \"cert-manager-cainjector-7d9f95dbf-zvrrq\" (UID: \"bc9180a6-db2c-45cc-aafa-285a56562abc\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-zvrrq" Oct 11 07:10:28 crc kubenswrapper[5055]: I1011 07:10:28.274289 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mp885\" (UniqueName: \"kubernetes.io/projected/bc9180a6-db2c-45cc-aafa-285a56562abc-kube-api-access-mp885\") pod \"cert-manager-cainjector-7d9f95dbf-zvrrq\" (UID: \"bc9180a6-db2c-45cc-aafa-285a56562abc\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-zvrrq" Oct 11 07:10:28 crc kubenswrapper[5055]: I1011 07:10:28.274430 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bc9180a6-db2c-45cc-aafa-285a56562abc-bound-sa-token\") pod \"cert-manager-cainjector-7d9f95dbf-zvrrq\" (UID: \"bc9180a6-db2c-45cc-aafa-285a56562abc\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-zvrrq" Oct 11 07:10:28 crc kubenswrapper[5055]: I1011 07:10:28.292401 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bc9180a6-db2c-45cc-aafa-285a56562abc-bound-sa-token\") pod \"cert-manager-cainjector-7d9f95dbf-zvrrq\" (UID: \"bc9180a6-db2c-45cc-aafa-285a56562abc\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-zvrrq" Oct 11 07:10:28 crc kubenswrapper[5055]: I1011 07:10:28.299863 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mp885\" (UniqueName: \"kubernetes.io/projected/bc9180a6-db2c-45cc-aafa-285a56562abc-kube-api-access-mp885\") pod \"cert-manager-cainjector-7d9f95dbf-zvrrq\" (UID: \"bc9180a6-db2c-45cc-aafa-285a56562abc\") " pod="cert-manager/cert-manager-cainjector-7d9f95dbf-zvrrq" Oct 11 07:10:28 crc kubenswrapper[5055]: I1011 07:10:28.395408 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-zvrrq" Oct 11 07:10:28 crc kubenswrapper[5055]: I1011 07:10:28.592172 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7d9f95dbf-zvrrq"] Oct 11 07:10:30 crc kubenswrapper[5055]: W1011 07:10:30.125721 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbc9180a6_db2c_45cc_aafa_285a56562abc.slice/crio-baef9ce30efd254f92457550cf294374e654e11bdc43b5cc58159f214cf14b44 WatchSource:0}: Error finding container baef9ce30efd254f92457550cf294374e654e11bdc43b5cc58159f214cf14b44: Status 404 returned error can't find the container with id baef9ce30efd254f92457550cf294374e654e11bdc43b5cc58159f214cf14b44 Oct 11 07:10:30 crc kubenswrapper[5055]: I1011 07:10:30.469076 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-zvrrq" event={"ID":"bc9180a6-db2c-45cc-aafa-285a56562abc","Type":"ContainerStarted","Data":"baef9ce30efd254f92457550cf294374e654e11bdc43b5cc58159f214cf14b44"} Oct 11 07:10:30 crc kubenswrapper[5055]: I1011 07:10:30.470456 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-d969966f-lqblm" event={"ID":"f879cfe7-12ac-431e-a7d3-a3c3bff29401","Type":"ContainerStarted","Data":"99f981aa5b037f9a77babd27093de1566c9e94662681d7a9759d54e4be26450d"} Oct 11 07:10:30 crc kubenswrapper[5055]: I1011 07:10:30.470626 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-d969966f-lqblm" Oct 11 07:10:31 crc kubenswrapper[5055]: I1011 07:10:31.478991 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-zvrrq" event={"ID":"bc9180a6-db2c-45cc-aafa-285a56562abc","Type":"ContainerStarted","Data":"9ade56f463d9d4cfc8bb94dd01ed32b2fc3cf3e6ecc0581ffb98fd230a9562ce"} Oct 11 07:10:31 crc kubenswrapper[5055]: I1011 07:10:31.507813 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-d969966f-lqblm" podStartSLOduration=2.222690119 podStartE2EDuration="6.507796681s" podCreationTimestamp="2025-10-11 07:10:25 +0000 UTC" firstStartedPulling="2025-10-11 07:10:25.901150804 +0000 UTC m=+1009.675424611" lastFinishedPulling="2025-10-11 07:10:30.186257366 +0000 UTC m=+1013.960531173" observedRunningTime="2025-10-11 07:10:30.486266296 +0000 UTC m=+1014.260540103" watchObservedRunningTime="2025-10-11 07:10:31.507796681 +0000 UTC m=+1015.282070488" Oct 11 07:10:31 crc kubenswrapper[5055]: I1011 07:10:31.509164 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7d9f95dbf-zvrrq" podStartSLOduration=2.32624917 podStartE2EDuration="3.50915841s" podCreationTimestamp="2025-10-11 07:10:28 +0000 UTC" firstStartedPulling="2025-10-11 07:10:30.128510621 +0000 UTC m=+1013.902784438" lastFinishedPulling="2025-10-11 07:10:31.311419871 +0000 UTC m=+1015.085693678" observedRunningTime="2025-10-11 07:10:31.505581458 +0000 UTC m=+1015.279855265" watchObservedRunningTime="2025-10-11 07:10:31.50915841 +0000 UTC m=+1015.283432217" Oct 11 07:10:35 crc kubenswrapper[5055]: I1011 07:10:35.440427 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-d969966f-lqblm" Oct 11 07:10:45 crc kubenswrapper[5055]: I1011 07:10:45.091609 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-7d4cc89fcb-5gv8v"] Oct 11 07:10:45 crc kubenswrapper[5055]: I1011 07:10:45.093019 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-7d4cc89fcb-5gv8v" Oct 11 07:10:45 crc kubenswrapper[5055]: I1011 07:10:45.096631 5055 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-h4ggt" Oct 11 07:10:45 crc kubenswrapper[5055]: I1011 07:10:45.102102 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-7d4cc89fcb-5gv8v"] Oct 11 07:10:45 crc kubenswrapper[5055]: I1011 07:10:45.120139 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcdpf\" (UniqueName: \"kubernetes.io/projected/733b591a-c173-4ea1-8f0a-4b48ec3f3a49-kube-api-access-fcdpf\") pod \"cert-manager-7d4cc89fcb-5gv8v\" (UID: \"733b591a-c173-4ea1-8f0a-4b48ec3f3a49\") " pod="cert-manager/cert-manager-7d4cc89fcb-5gv8v" Oct 11 07:10:45 crc kubenswrapper[5055]: I1011 07:10:45.120196 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/733b591a-c173-4ea1-8f0a-4b48ec3f3a49-bound-sa-token\") pod \"cert-manager-7d4cc89fcb-5gv8v\" (UID: \"733b591a-c173-4ea1-8f0a-4b48ec3f3a49\") " pod="cert-manager/cert-manager-7d4cc89fcb-5gv8v" Oct 11 07:10:45 crc kubenswrapper[5055]: I1011 07:10:45.221068 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcdpf\" (UniqueName: \"kubernetes.io/projected/733b591a-c173-4ea1-8f0a-4b48ec3f3a49-kube-api-access-fcdpf\") pod \"cert-manager-7d4cc89fcb-5gv8v\" (UID: \"733b591a-c173-4ea1-8f0a-4b48ec3f3a49\") " pod="cert-manager/cert-manager-7d4cc89fcb-5gv8v" Oct 11 07:10:45 crc kubenswrapper[5055]: I1011 07:10:45.221118 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/733b591a-c173-4ea1-8f0a-4b48ec3f3a49-bound-sa-token\") pod \"cert-manager-7d4cc89fcb-5gv8v\" (UID: \"733b591a-c173-4ea1-8f0a-4b48ec3f3a49\") " pod="cert-manager/cert-manager-7d4cc89fcb-5gv8v" Oct 11 07:10:45 crc kubenswrapper[5055]: I1011 07:10:45.238720 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/733b591a-c173-4ea1-8f0a-4b48ec3f3a49-bound-sa-token\") pod \"cert-manager-7d4cc89fcb-5gv8v\" (UID: \"733b591a-c173-4ea1-8f0a-4b48ec3f3a49\") " pod="cert-manager/cert-manager-7d4cc89fcb-5gv8v" Oct 11 07:10:45 crc kubenswrapper[5055]: I1011 07:10:45.239018 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcdpf\" (UniqueName: \"kubernetes.io/projected/733b591a-c173-4ea1-8f0a-4b48ec3f3a49-kube-api-access-fcdpf\") pod \"cert-manager-7d4cc89fcb-5gv8v\" (UID: \"733b591a-c173-4ea1-8f0a-4b48ec3f3a49\") " pod="cert-manager/cert-manager-7d4cc89fcb-5gv8v" Oct 11 07:10:45 crc kubenswrapper[5055]: I1011 07:10:45.411354 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-7d4cc89fcb-5gv8v" Oct 11 07:10:45 crc kubenswrapper[5055]: I1011 07:10:45.623664 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-7d4cc89fcb-5gv8v"] Oct 11 07:10:45 crc kubenswrapper[5055]: W1011 07:10:45.629094 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod733b591a_c173_4ea1_8f0a_4b48ec3f3a49.slice/crio-a0e0d3b85a9f974e85b72e704e82cf2f45b7b0c60fbb00835a56d409a895d45e WatchSource:0}: Error finding container a0e0d3b85a9f974e85b72e704e82cf2f45b7b0c60fbb00835a56d409a895d45e: Status 404 returned error can't find the container with id a0e0d3b85a9f974e85b72e704e82cf2f45b7b0c60fbb00835a56d409a895d45e Oct 11 07:10:46 crc kubenswrapper[5055]: I1011 07:10:46.572665 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-7d4cc89fcb-5gv8v" event={"ID":"733b591a-c173-4ea1-8f0a-4b48ec3f3a49","Type":"ContainerStarted","Data":"6c2f159b3ec4df9e74e99f66b5ddfa2c4604525b7056de9b22eef27a57b36525"} Oct 11 07:10:46 crc kubenswrapper[5055]: I1011 07:10:46.572953 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-7d4cc89fcb-5gv8v" event={"ID":"733b591a-c173-4ea1-8f0a-4b48ec3f3a49","Type":"ContainerStarted","Data":"a0e0d3b85a9f974e85b72e704e82cf2f45b7b0c60fbb00835a56d409a895d45e"} Oct 11 07:10:46 crc kubenswrapper[5055]: I1011 07:10:46.606692 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-7d4cc89fcb-5gv8v" podStartSLOduration=1.606618809 podStartE2EDuration="1.606618809s" podCreationTimestamp="2025-10-11 07:10:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:10:46.593988797 +0000 UTC m=+1030.368262654" watchObservedRunningTime="2025-10-11 07:10:46.606618809 +0000 UTC m=+1030.380892656" Oct 11 07:10:49 crc kubenswrapper[5055]: I1011 07:10:49.889175 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-fkk56"] Oct 11 07:10:49 crc kubenswrapper[5055]: I1011 07:10:49.890584 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-fkk56" Oct 11 07:10:49 crc kubenswrapper[5055]: I1011 07:10:49.893178 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-9cwcz" Oct 11 07:10:49 crc kubenswrapper[5055]: I1011 07:10:49.897340 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Oct 11 07:10:49 crc kubenswrapper[5055]: I1011 07:10:49.897502 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Oct 11 07:10:49 crc kubenswrapper[5055]: I1011 07:10:49.903838 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-fkk56"] Oct 11 07:10:49 crc kubenswrapper[5055]: I1011 07:10:49.987344 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fg647\" (UniqueName: \"kubernetes.io/projected/c6220229-d2e1-4b30-8a71-0515d78c2f3d-kube-api-access-fg647\") pod \"openstack-operator-index-fkk56\" (UID: \"c6220229-d2e1-4b30-8a71-0515d78c2f3d\") " pod="openstack-operators/openstack-operator-index-fkk56" Oct 11 07:10:50 crc kubenswrapper[5055]: I1011 07:10:50.088505 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fg647\" (UniqueName: \"kubernetes.io/projected/c6220229-d2e1-4b30-8a71-0515d78c2f3d-kube-api-access-fg647\") pod \"openstack-operator-index-fkk56\" (UID: \"c6220229-d2e1-4b30-8a71-0515d78c2f3d\") " pod="openstack-operators/openstack-operator-index-fkk56" Oct 11 07:10:50 crc kubenswrapper[5055]: I1011 07:10:50.106122 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fg647\" (UniqueName: \"kubernetes.io/projected/c6220229-d2e1-4b30-8a71-0515d78c2f3d-kube-api-access-fg647\") pod \"openstack-operator-index-fkk56\" (UID: \"c6220229-d2e1-4b30-8a71-0515d78c2f3d\") " pod="openstack-operators/openstack-operator-index-fkk56" Oct 11 07:10:50 crc kubenswrapper[5055]: I1011 07:10:50.271949 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-fkk56" Oct 11 07:10:50 crc kubenswrapper[5055]: I1011 07:10:50.456512 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-fkk56"] Oct 11 07:10:50 crc kubenswrapper[5055]: I1011 07:10:50.596045 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-fkk56" event={"ID":"c6220229-d2e1-4b30-8a71-0515d78c2f3d","Type":"ContainerStarted","Data":"85be1a19d8d7819abe22cf6ef983b83b0917c7142952c4adf6ac0e5d6be2a56e"} Oct 11 07:10:51 crc kubenswrapper[5055]: I1011 07:10:51.604338 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-fkk56" event={"ID":"c6220229-d2e1-4b30-8a71-0515d78c2f3d","Type":"ContainerStarted","Data":"74c5b8f3b9d72a6904c0bd06b58e5bf9408b4bcd69003397350bb06e78e63242"} Oct 11 07:10:51 crc kubenswrapper[5055]: I1011 07:10:51.628226 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-fkk56" podStartSLOduration=1.8217282529999999 podStartE2EDuration="2.628197462s" podCreationTimestamp="2025-10-11 07:10:49 +0000 UTC" firstStartedPulling="2025-10-11 07:10:50.470647579 +0000 UTC m=+1034.244921386" lastFinishedPulling="2025-10-11 07:10:51.277116768 +0000 UTC m=+1035.051390595" observedRunningTime="2025-10-11 07:10:51.623486497 +0000 UTC m=+1035.397760344" watchObservedRunningTime="2025-10-11 07:10:51.628197462 +0000 UTC m=+1035.402471309" Oct 11 07:10:53 crc kubenswrapper[5055]: I1011 07:10:53.067122 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-fkk56"] Oct 11 07:10:53 crc kubenswrapper[5055]: I1011 07:10:53.615152 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-fkk56" podUID="c6220229-d2e1-4b30-8a71-0515d78c2f3d" containerName="registry-server" containerID="cri-o://74c5b8f3b9d72a6904c0bd06b58e5bf9408b4bcd69003397350bb06e78e63242" gracePeriod=2 Oct 11 07:10:53 crc kubenswrapper[5055]: I1011 07:10:53.673694 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-t6qk8"] Oct 11 07:10:53 crc kubenswrapper[5055]: I1011 07:10:53.674561 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-t6qk8" Oct 11 07:10:53 crc kubenswrapper[5055]: I1011 07:10:53.683282 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-t6qk8"] Oct 11 07:10:53 crc kubenswrapper[5055]: I1011 07:10:53.739394 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p95m2\" (UniqueName: \"kubernetes.io/projected/716f5b93-0e21-4e3a-a15f-01f2a6ece1fa-kube-api-access-p95m2\") pod \"openstack-operator-index-t6qk8\" (UID: \"716f5b93-0e21-4e3a-a15f-01f2a6ece1fa\") " pod="openstack-operators/openstack-operator-index-t6qk8" Oct 11 07:10:53 crc kubenswrapper[5055]: I1011 07:10:53.840885 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p95m2\" (UniqueName: \"kubernetes.io/projected/716f5b93-0e21-4e3a-a15f-01f2a6ece1fa-kube-api-access-p95m2\") pod \"openstack-operator-index-t6qk8\" (UID: \"716f5b93-0e21-4e3a-a15f-01f2a6ece1fa\") " pod="openstack-operators/openstack-operator-index-t6qk8" Oct 11 07:10:53 crc kubenswrapper[5055]: I1011 07:10:53.859360 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p95m2\" (UniqueName: \"kubernetes.io/projected/716f5b93-0e21-4e3a-a15f-01f2a6ece1fa-kube-api-access-p95m2\") pod \"openstack-operator-index-t6qk8\" (UID: \"716f5b93-0e21-4e3a-a15f-01f2a6ece1fa\") " pod="openstack-operators/openstack-operator-index-t6qk8" Oct 11 07:10:53 crc kubenswrapper[5055]: I1011 07:10:53.991409 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-fkk56" Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.043442 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fg647\" (UniqueName: \"kubernetes.io/projected/c6220229-d2e1-4b30-8a71-0515d78c2f3d-kube-api-access-fg647\") pod \"c6220229-d2e1-4b30-8a71-0515d78c2f3d\" (UID: \"c6220229-d2e1-4b30-8a71-0515d78c2f3d\") " Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.046810 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6220229-d2e1-4b30-8a71-0515d78c2f3d-kube-api-access-fg647" (OuterVolumeSpecName: "kube-api-access-fg647") pod "c6220229-d2e1-4b30-8a71-0515d78c2f3d" (UID: "c6220229-d2e1-4b30-8a71-0515d78c2f3d"). InnerVolumeSpecName "kube-api-access-fg647". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.072130 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-t6qk8" Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.144960 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fg647\" (UniqueName: \"kubernetes.io/projected/c6220229-d2e1-4b30-8a71-0515d78c2f3d-kube-api-access-fg647\") on node \"crc\" DevicePath \"\"" Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.442024 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-t6qk8"] Oct 11 07:10:54 crc kubenswrapper[5055]: W1011 07:10:54.453867 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod716f5b93_0e21_4e3a_a15f_01f2a6ece1fa.slice/crio-ab048a3aa63b86c1a9c3a5f958a5fe8c4b0980b030202e111a2b857acff1f5b2 WatchSource:0}: Error finding container ab048a3aa63b86c1a9c3a5f958a5fe8c4b0980b030202e111a2b857acff1f5b2: Status 404 returned error can't find the container with id ab048a3aa63b86c1a9c3a5f958a5fe8c4b0980b030202e111a2b857acff1f5b2 Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.624098 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-t6qk8" event={"ID":"716f5b93-0e21-4e3a-a15f-01f2a6ece1fa","Type":"ContainerStarted","Data":"ab048a3aa63b86c1a9c3a5f958a5fe8c4b0980b030202e111a2b857acff1f5b2"} Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.626414 5055 generic.go:334] "Generic (PLEG): container finished" podID="c6220229-d2e1-4b30-8a71-0515d78c2f3d" containerID="74c5b8f3b9d72a6904c0bd06b58e5bf9408b4bcd69003397350bb06e78e63242" exitCode=0 Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.626492 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-fkk56" Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.626482 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-fkk56" event={"ID":"c6220229-d2e1-4b30-8a71-0515d78c2f3d","Type":"ContainerDied","Data":"74c5b8f3b9d72a6904c0bd06b58e5bf9408b4bcd69003397350bb06e78e63242"} Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.626974 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-fkk56" event={"ID":"c6220229-d2e1-4b30-8a71-0515d78c2f3d","Type":"ContainerDied","Data":"85be1a19d8d7819abe22cf6ef983b83b0917c7142952c4adf6ac0e5d6be2a56e"} Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.627010 5055 scope.go:117] "RemoveContainer" containerID="74c5b8f3b9d72a6904c0bd06b58e5bf9408b4bcd69003397350bb06e78e63242" Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.648317 5055 scope.go:117] "RemoveContainer" containerID="74c5b8f3b9d72a6904c0bd06b58e5bf9408b4bcd69003397350bb06e78e63242" Oct 11 07:10:54 crc kubenswrapper[5055]: E1011 07:10:54.649267 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74c5b8f3b9d72a6904c0bd06b58e5bf9408b4bcd69003397350bb06e78e63242\": container with ID starting with 74c5b8f3b9d72a6904c0bd06b58e5bf9408b4bcd69003397350bb06e78e63242 not found: ID does not exist" containerID="74c5b8f3b9d72a6904c0bd06b58e5bf9408b4bcd69003397350bb06e78e63242" Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.649341 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74c5b8f3b9d72a6904c0bd06b58e5bf9408b4bcd69003397350bb06e78e63242"} err="failed to get container status \"74c5b8f3b9d72a6904c0bd06b58e5bf9408b4bcd69003397350bb06e78e63242\": rpc error: code = NotFound desc = could not find container \"74c5b8f3b9d72a6904c0bd06b58e5bf9408b4bcd69003397350bb06e78e63242\": container with ID starting with 74c5b8f3b9d72a6904c0bd06b58e5bf9408b4bcd69003397350bb06e78e63242 not found: ID does not exist" Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.675321 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-fkk56"] Oct 11 07:10:54 crc kubenswrapper[5055]: I1011 07:10:54.683585 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-fkk56"] Oct 11 07:10:55 crc kubenswrapper[5055]: I1011 07:10:55.004289 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6220229-d2e1-4b30-8a71-0515d78c2f3d" path="/var/lib/kubelet/pods/c6220229-d2e1-4b30-8a71-0515d78c2f3d/volumes" Oct 11 07:10:55 crc kubenswrapper[5055]: I1011 07:10:55.635732 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-t6qk8" event={"ID":"716f5b93-0e21-4e3a-a15f-01f2a6ece1fa","Type":"ContainerStarted","Data":"c444366cff7736f2f978f4ba3363c09b64e8e8f268af684c94ac48aec8ed6b2c"} Oct 11 07:10:55 crc kubenswrapper[5055]: I1011 07:10:55.651463 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-t6qk8" podStartSLOduration=2.232418595 podStartE2EDuration="2.651444557s" podCreationTimestamp="2025-10-11 07:10:53 +0000 UTC" firstStartedPulling="2025-10-11 07:10:54.457121139 +0000 UTC m=+1038.231394946" lastFinishedPulling="2025-10-11 07:10:54.876147091 +0000 UTC m=+1038.650420908" observedRunningTime="2025-10-11 07:10:55.65012382 +0000 UTC m=+1039.424397657" watchObservedRunningTime="2025-10-11 07:10:55.651444557 +0000 UTC m=+1039.425718364" Oct 11 07:11:04 crc kubenswrapper[5055]: I1011 07:11:04.073368 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-t6qk8" Oct 11 07:11:04 crc kubenswrapper[5055]: I1011 07:11:04.073747 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-t6qk8" Oct 11 07:11:04 crc kubenswrapper[5055]: I1011 07:11:04.100170 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-t6qk8" Oct 11 07:11:04 crc kubenswrapper[5055]: I1011 07:11:04.731274 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-t6qk8" Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.718718 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl"] Oct 11 07:11:11 crc kubenswrapper[5055]: E1011 07:11:11.719453 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6220229-d2e1-4b30-8a71-0515d78c2f3d" containerName="registry-server" Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.719464 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6220229-d2e1-4b30-8a71-0515d78c2f3d" containerName="registry-server" Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.719576 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6220229-d2e1-4b30-8a71-0515d78c2f3d" containerName="registry-server" Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.720353 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.723119 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-kntfw" Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.731866 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl"] Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.803316 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca031800-cd06-4d1b-bea6-cbd26e97b325-util\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl\" (UID: \"ca031800-cd06-4d1b-bea6-cbd26e97b325\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.803392 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca031800-cd06-4d1b-bea6-cbd26e97b325-bundle\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl\" (UID: \"ca031800-cd06-4d1b-bea6-cbd26e97b325\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.803442 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmh9z\" (UniqueName: \"kubernetes.io/projected/ca031800-cd06-4d1b-bea6-cbd26e97b325-kube-api-access-lmh9z\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl\" (UID: \"ca031800-cd06-4d1b-bea6-cbd26e97b325\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.905040 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmh9z\" (UniqueName: \"kubernetes.io/projected/ca031800-cd06-4d1b-bea6-cbd26e97b325-kube-api-access-lmh9z\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl\" (UID: \"ca031800-cd06-4d1b-bea6-cbd26e97b325\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.905148 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca031800-cd06-4d1b-bea6-cbd26e97b325-util\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl\" (UID: \"ca031800-cd06-4d1b-bea6-cbd26e97b325\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.905215 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca031800-cd06-4d1b-bea6-cbd26e97b325-bundle\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl\" (UID: \"ca031800-cd06-4d1b-bea6-cbd26e97b325\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.905632 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca031800-cd06-4d1b-bea6-cbd26e97b325-util\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl\" (UID: \"ca031800-cd06-4d1b-bea6-cbd26e97b325\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.905699 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca031800-cd06-4d1b-bea6-cbd26e97b325-bundle\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl\" (UID: \"ca031800-cd06-4d1b-bea6-cbd26e97b325\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" Oct 11 07:11:11 crc kubenswrapper[5055]: I1011 07:11:11.922101 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmh9z\" (UniqueName: \"kubernetes.io/projected/ca031800-cd06-4d1b-bea6-cbd26e97b325-kube-api-access-lmh9z\") pod \"bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl\" (UID: \"ca031800-cd06-4d1b-bea6-cbd26e97b325\") " pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" Oct 11 07:11:12 crc kubenswrapper[5055]: I1011 07:11:12.048891 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" Oct 11 07:11:12 crc kubenswrapper[5055]: I1011 07:11:12.439082 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl"] Oct 11 07:11:12 crc kubenswrapper[5055]: W1011 07:11:12.448804 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podca031800_cd06_4d1b_bea6_cbd26e97b325.slice/crio-eb266275d9567fe5fa7756809910930884977b16238e9d2492cb91704526fd0e WatchSource:0}: Error finding container eb266275d9567fe5fa7756809910930884977b16238e9d2492cb91704526fd0e: Status 404 returned error can't find the container with id eb266275d9567fe5fa7756809910930884977b16238e9d2492cb91704526fd0e Oct 11 07:11:12 crc kubenswrapper[5055]: I1011 07:11:12.750095 5055 generic.go:334] "Generic (PLEG): container finished" podID="ca031800-cd06-4d1b-bea6-cbd26e97b325" containerID="f9962f042ee8e6fa3e213e18d5dea6368e319cf0eb5aa5327fd5b4116cd4f7ca" exitCode=0 Oct 11 07:11:12 crc kubenswrapper[5055]: I1011 07:11:12.750143 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" event={"ID":"ca031800-cd06-4d1b-bea6-cbd26e97b325","Type":"ContainerDied","Data":"f9962f042ee8e6fa3e213e18d5dea6368e319cf0eb5aa5327fd5b4116cd4f7ca"} Oct 11 07:11:12 crc kubenswrapper[5055]: I1011 07:11:12.750466 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" event={"ID":"ca031800-cd06-4d1b-bea6-cbd26e97b325","Type":"ContainerStarted","Data":"eb266275d9567fe5fa7756809910930884977b16238e9d2492cb91704526fd0e"} Oct 11 07:11:13 crc kubenswrapper[5055]: I1011 07:11:13.757147 5055 generic.go:334] "Generic (PLEG): container finished" podID="ca031800-cd06-4d1b-bea6-cbd26e97b325" containerID="bcf0c7112bbc823eebe03a2091dfa6f04e67695293232aeaf30aae842cc1cad7" exitCode=0 Oct 11 07:11:13 crc kubenswrapper[5055]: I1011 07:11:13.757230 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" event={"ID":"ca031800-cd06-4d1b-bea6-cbd26e97b325","Type":"ContainerDied","Data":"bcf0c7112bbc823eebe03a2091dfa6f04e67695293232aeaf30aae842cc1cad7"} Oct 11 07:11:14 crc kubenswrapper[5055]: I1011 07:11:14.767577 5055 generic.go:334] "Generic (PLEG): container finished" podID="ca031800-cd06-4d1b-bea6-cbd26e97b325" containerID="21e2e0ae7751675e2f875ca2b3f8f6795289797a401fef60f386d2aa20d84f76" exitCode=0 Oct 11 07:11:14 crc kubenswrapper[5055]: I1011 07:11:14.768285 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" event={"ID":"ca031800-cd06-4d1b-bea6-cbd26e97b325","Type":"ContainerDied","Data":"21e2e0ae7751675e2f875ca2b3f8f6795289797a401fef60f386d2aa20d84f76"} Oct 11 07:11:16 crc kubenswrapper[5055]: I1011 07:11:16.051788 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" Oct 11 07:11:16 crc kubenswrapper[5055]: I1011 07:11:16.174887 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca031800-cd06-4d1b-bea6-cbd26e97b325-bundle\") pod \"ca031800-cd06-4d1b-bea6-cbd26e97b325\" (UID: \"ca031800-cd06-4d1b-bea6-cbd26e97b325\") " Oct 11 07:11:16 crc kubenswrapper[5055]: I1011 07:11:16.175036 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmh9z\" (UniqueName: \"kubernetes.io/projected/ca031800-cd06-4d1b-bea6-cbd26e97b325-kube-api-access-lmh9z\") pod \"ca031800-cd06-4d1b-bea6-cbd26e97b325\" (UID: \"ca031800-cd06-4d1b-bea6-cbd26e97b325\") " Oct 11 07:11:16 crc kubenswrapper[5055]: I1011 07:11:16.175074 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca031800-cd06-4d1b-bea6-cbd26e97b325-util\") pod \"ca031800-cd06-4d1b-bea6-cbd26e97b325\" (UID: \"ca031800-cd06-4d1b-bea6-cbd26e97b325\") " Oct 11 07:11:16 crc kubenswrapper[5055]: I1011 07:11:16.176920 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca031800-cd06-4d1b-bea6-cbd26e97b325-bundle" (OuterVolumeSpecName: "bundle") pod "ca031800-cd06-4d1b-bea6-cbd26e97b325" (UID: "ca031800-cd06-4d1b-bea6-cbd26e97b325"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:11:16 crc kubenswrapper[5055]: I1011 07:11:16.181099 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca031800-cd06-4d1b-bea6-cbd26e97b325-kube-api-access-lmh9z" (OuterVolumeSpecName: "kube-api-access-lmh9z") pod "ca031800-cd06-4d1b-bea6-cbd26e97b325" (UID: "ca031800-cd06-4d1b-bea6-cbd26e97b325"). InnerVolumeSpecName "kube-api-access-lmh9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:11:16 crc kubenswrapper[5055]: I1011 07:11:16.194458 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca031800-cd06-4d1b-bea6-cbd26e97b325-util" (OuterVolumeSpecName: "util") pod "ca031800-cd06-4d1b-bea6-cbd26e97b325" (UID: "ca031800-cd06-4d1b-bea6-cbd26e97b325"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:11:16 crc kubenswrapper[5055]: I1011 07:11:16.277129 5055 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ca031800-cd06-4d1b-bea6-cbd26e97b325-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:11:16 crc kubenswrapper[5055]: I1011 07:11:16.277164 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmh9z\" (UniqueName: \"kubernetes.io/projected/ca031800-cd06-4d1b-bea6-cbd26e97b325-kube-api-access-lmh9z\") on node \"crc\" DevicePath \"\"" Oct 11 07:11:16 crc kubenswrapper[5055]: I1011 07:11:16.277176 5055 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ca031800-cd06-4d1b-bea6-cbd26e97b325-util\") on node \"crc\" DevicePath \"\"" Oct 11 07:11:16 crc kubenswrapper[5055]: I1011 07:11:16.785948 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" event={"ID":"ca031800-cd06-4d1b-bea6-cbd26e97b325","Type":"ContainerDied","Data":"eb266275d9567fe5fa7756809910930884977b16238e9d2492cb91704526fd0e"} Oct 11 07:11:16 crc kubenswrapper[5055]: I1011 07:11:16.786009 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb266275d9567fe5fa7756809910930884977b16238e9d2492cb91704526fd0e" Oct 11 07:11:16 crc kubenswrapper[5055]: I1011 07:11:16.786016 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl" Oct 11 07:11:19 crc kubenswrapper[5055]: I1011 07:11:19.578655 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-688d597459-f26lm"] Oct 11 07:11:19 crc kubenswrapper[5055]: E1011 07:11:19.579660 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca031800-cd06-4d1b-bea6-cbd26e97b325" containerName="pull" Oct 11 07:11:19 crc kubenswrapper[5055]: I1011 07:11:19.579691 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca031800-cd06-4d1b-bea6-cbd26e97b325" containerName="pull" Oct 11 07:11:19 crc kubenswrapper[5055]: E1011 07:11:19.579721 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca031800-cd06-4d1b-bea6-cbd26e97b325" containerName="extract" Oct 11 07:11:19 crc kubenswrapper[5055]: I1011 07:11:19.579734 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca031800-cd06-4d1b-bea6-cbd26e97b325" containerName="extract" Oct 11 07:11:19 crc kubenswrapper[5055]: E1011 07:11:19.579842 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca031800-cd06-4d1b-bea6-cbd26e97b325" containerName="util" Oct 11 07:11:19 crc kubenswrapper[5055]: I1011 07:11:19.579858 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca031800-cd06-4d1b-bea6-cbd26e97b325" containerName="util" Oct 11 07:11:19 crc kubenswrapper[5055]: I1011 07:11:19.580083 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca031800-cd06-4d1b-bea6-cbd26e97b325" containerName="extract" Oct 11 07:11:19 crc kubenswrapper[5055]: I1011 07:11:19.581582 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-688d597459-f26lm" Oct 11 07:11:19 crc kubenswrapper[5055]: I1011 07:11:19.584554 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-9rg2v" Oct 11 07:11:19 crc kubenswrapper[5055]: I1011 07:11:19.610359 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-688d597459-f26lm"] Oct 11 07:11:19 crc kubenswrapper[5055]: I1011 07:11:19.730513 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxsw8\" (UniqueName: \"kubernetes.io/projected/bf25a169-f625-4758-9a83-4cd9acdcdffe-kube-api-access-zxsw8\") pod \"openstack-operator-controller-operator-688d597459-f26lm\" (UID: \"bf25a169-f625-4758-9a83-4cd9acdcdffe\") " pod="openstack-operators/openstack-operator-controller-operator-688d597459-f26lm" Oct 11 07:11:19 crc kubenswrapper[5055]: I1011 07:11:19.831919 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxsw8\" (UniqueName: \"kubernetes.io/projected/bf25a169-f625-4758-9a83-4cd9acdcdffe-kube-api-access-zxsw8\") pod \"openstack-operator-controller-operator-688d597459-f26lm\" (UID: \"bf25a169-f625-4758-9a83-4cd9acdcdffe\") " pod="openstack-operators/openstack-operator-controller-operator-688d597459-f26lm" Oct 11 07:11:19 crc kubenswrapper[5055]: I1011 07:11:19.852732 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxsw8\" (UniqueName: \"kubernetes.io/projected/bf25a169-f625-4758-9a83-4cd9acdcdffe-kube-api-access-zxsw8\") pod \"openstack-operator-controller-operator-688d597459-f26lm\" (UID: \"bf25a169-f625-4758-9a83-4cd9acdcdffe\") " pod="openstack-operators/openstack-operator-controller-operator-688d597459-f26lm" Oct 11 07:11:19 crc kubenswrapper[5055]: I1011 07:11:19.901289 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-688d597459-f26lm" Oct 11 07:11:20 crc kubenswrapper[5055]: I1011 07:11:20.150297 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-688d597459-f26lm"] Oct 11 07:11:20 crc kubenswrapper[5055]: I1011 07:11:20.808850 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-688d597459-f26lm" event={"ID":"bf25a169-f625-4758-9a83-4cd9acdcdffe","Type":"ContainerStarted","Data":"7d607071e8d1d0b2e519bccf67ee146fddc3ece4bf6cd2078a41383020e6b915"} Oct 11 07:11:24 crc kubenswrapper[5055]: I1011 07:11:24.847895 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-688d597459-f26lm" event={"ID":"bf25a169-f625-4758-9a83-4cd9acdcdffe","Type":"ContainerStarted","Data":"1148c70a25a2b4eaea0906ab4caf102619415d4b48dddf876921d1a4867247d6"} Oct 11 07:11:26 crc kubenswrapper[5055]: I1011 07:11:26.861402 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-688d597459-f26lm" event={"ID":"bf25a169-f625-4758-9a83-4cd9acdcdffe","Type":"ContainerStarted","Data":"1f2be59eca8a87286e5d5d6d39fbea3beae55888b8ef041f029633760e1ff50f"} Oct 11 07:11:26 crc kubenswrapper[5055]: I1011 07:11:26.861715 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-688d597459-f26lm" Oct 11 07:11:26 crc kubenswrapper[5055]: I1011 07:11:26.890172 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-688d597459-f26lm" podStartSLOduration=1.855336489 podStartE2EDuration="7.890153499s" podCreationTimestamp="2025-10-11 07:11:19 +0000 UTC" firstStartedPulling="2025-10-11 07:11:20.160655124 +0000 UTC m=+1063.934928931" lastFinishedPulling="2025-10-11 07:11:26.195472134 +0000 UTC m=+1069.969745941" observedRunningTime="2025-10-11 07:11:26.887696869 +0000 UTC m=+1070.661970676" watchObservedRunningTime="2025-10-11 07:11:26.890153499 +0000 UTC m=+1070.664427306" Oct 11 07:11:29 crc kubenswrapper[5055]: I1011 07:11:29.904146 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-688d597459-f26lm" Oct 11 07:11:32 crc kubenswrapper[5055]: I1011 07:11:32.421853 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:11:32 crc kubenswrapper[5055]: I1011 07:11:32.422147 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.611974 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-658bdf4b74-5ngjz"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.613360 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-5ngjz" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.615370 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-s4jqn" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.623030 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-658bdf4b74-5ngjz"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.628055 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7b7fb68549-v7jqb"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.629478 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-v7jqb" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.631966 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-n7lpd" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.641103 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-85d5d9dd78-zmrmw"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.642235 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-zmrmw" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.651164 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-ppvkt" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.658863 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-84b9b84486-bdvrf"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.659805 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-bdvrf" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.663517 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-85d5d9dd78-zmrmw"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.670779 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-lkhdq" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.677773 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6ncn\" (UniqueName: \"kubernetes.io/projected/8e18a79a-705e-467e-b7cd-0fc0a90f1a7a-kube-api-access-s6ncn\") pod \"cinder-operator-controller-manager-7b7fb68549-v7jqb\" (UID: \"8e18a79a-705e-467e-b7cd-0fc0a90f1a7a\") " pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-v7jqb" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.677874 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd9x4\" (UniqueName: \"kubernetes.io/projected/b3d55b74-0882-459a-be9a-2da659337819-kube-api-access-sd9x4\") pod \"barbican-operator-controller-manager-658bdf4b74-5ngjz\" (UID: \"b3d55b74-0882-459a-be9a-2da659337819\") " pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-5ngjz" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.708224 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-84b9b84486-bdvrf"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.714636 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-858f76bbdd-gxs22"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.719639 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-gxs22" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.723253 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-sd9vv" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.732165 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-858f76bbdd-gxs22"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.773357 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7b7fb68549-v7jqb"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.779466 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7msqf\" (UniqueName: \"kubernetes.io/projected/cb2cd76d-3803-48d6-8882-9b895be3c494-kube-api-access-7msqf\") pod \"designate-operator-controller-manager-85d5d9dd78-zmrmw\" (UID: \"cb2cd76d-3803-48d6-8882-9b895be3c494\") " pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-zmrmw" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.779519 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd9x4\" (UniqueName: \"kubernetes.io/projected/b3d55b74-0882-459a-be9a-2da659337819-kube-api-access-sd9x4\") pod \"barbican-operator-controller-manager-658bdf4b74-5ngjz\" (UID: \"b3d55b74-0882-459a-be9a-2da659337819\") " pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-5ngjz" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.779580 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dv2vz\" (UniqueName: \"kubernetes.io/projected/d0538072-f53e-4d2a-858e-667657ffd09e-kube-api-access-dv2vz\") pod \"heat-operator-controller-manager-858f76bbdd-gxs22\" (UID: \"d0538072-f53e-4d2a-858e-667657ffd09e\") " pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-gxs22" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.779640 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6ncn\" (UniqueName: \"kubernetes.io/projected/8e18a79a-705e-467e-b7cd-0fc0a90f1a7a-kube-api-access-s6ncn\") pod \"cinder-operator-controller-manager-7b7fb68549-v7jqb\" (UID: \"8e18a79a-705e-467e-b7cd-0fc0a90f1a7a\") " pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-v7jqb" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.779666 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrvj6\" (UniqueName: \"kubernetes.io/projected/ea4aab30-444b-4efe-a76d-bcd6c673fffe-kube-api-access-lrvj6\") pod \"glance-operator-controller-manager-84b9b84486-bdvrf\" (UID: \"ea4aab30-444b-4efe-a76d-bcd6c673fffe\") " pod="openstack-operators/glance-operator-controller-manager-84b9b84486-bdvrf" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.788156 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7ffbcb7588-5zhf2"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.789753 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-5zhf2" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.799483 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-b9qr4" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.809596 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.810548 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.814302 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-wjhhr" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.814439 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.814522 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7ffbcb7588-5zhf2"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.820635 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.837644 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6ncn\" (UniqueName: \"kubernetes.io/projected/8e18a79a-705e-467e-b7cd-0fc0a90f1a7a-kube-api-access-s6ncn\") pod \"cinder-operator-controller-manager-7b7fb68549-v7jqb\" (UID: \"8e18a79a-705e-467e-b7cd-0fc0a90f1a7a\") " pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-v7jqb" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.839429 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd9x4\" (UniqueName: \"kubernetes.io/projected/b3d55b74-0882-459a-be9a-2da659337819-kube-api-access-sd9x4\") pod \"barbican-operator-controller-manager-658bdf4b74-5ngjz\" (UID: \"b3d55b74-0882-459a-be9a-2da659337819\") " pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-5ngjz" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.852029 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-9c5c78d49-jsbhj"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.853274 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-jsbhj" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.862173 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-wx5xx" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.870508 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-nmq2v"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.871658 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-nmq2v" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.882812 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-9c5c78d49-jsbhj"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.887532 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-xrghk" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.887605 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbtx4\" (UniqueName: \"kubernetes.io/projected/5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5-kube-api-access-sbtx4\") pod \"infra-operator-controller-manager-656bcbd775-rk8j7\" (UID: \"5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.887656 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7msqf\" (UniqueName: \"kubernetes.io/projected/cb2cd76d-3803-48d6-8882-9b895be3c494-kube-api-access-7msqf\") pod \"designate-operator-controller-manager-85d5d9dd78-zmrmw\" (UID: \"cb2cd76d-3803-48d6-8882-9b895be3c494\") " pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-zmrmw" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.887715 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dv2vz\" (UniqueName: \"kubernetes.io/projected/d0538072-f53e-4d2a-858e-667657ffd09e-kube-api-access-dv2vz\") pod \"heat-operator-controller-manager-858f76bbdd-gxs22\" (UID: \"d0538072-f53e-4d2a-858e-667657ffd09e\") " pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-gxs22" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.887789 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5-cert\") pod \"infra-operator-controller-manager-656bcbd775-rk8j7\" (UID: \"5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.887825 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrvj6\" (UniqueName: \"kubernetes.io/projected/ea4aab30-444b-4efe-a76d-bcd6c673fffe-kube-api-access-lrvj6\") pod \"glance-operator-controller-manager-84b9b84486-bdvrf\" (UID: \"ea4aab30-444b-4efe-a76d-bcd6c673fffe\") " pod="openstack-operators/glance-operator-controller-manager-84b9b84486-bdvrf" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.887853 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5b9k8\" (UniqueName: \"kubernetes.io/projected/c6fd3cbf-29c5-4820-8936-2203cfa34345-kube-api-access-5b9k8\") pod \"horizon-operator-controller-manager-7ffbcb7588-5zhf2\" (UID: \"c6fd3cbf-29c5-4820-8936-2203cfa34345\") " pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-5zhf2" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.942531 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dv2vz\" (UniqueName: \"kubernetes.io/projected/d0538072-f53e-4d2a-858e-667657ffd09e-kube-api-access-dv2vz\") pod \"heat-operator-controller-manager-858f76bbdd-gxs22\" (UID: \"d0538072-f53e-4d2a-858e-667657ffd09e\") " pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-gxs22" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.944577 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7msqf\" (UniqueName: \"kubernetes.io/projected/cb2cd76d-3803-48d6-8882-9b895be3c494-kube-api-access-7msqf\") pod \"designate-operator-controller-manager-85d5d9dd78-zmrmw\" (UID: \"cb2cd76d-3803-48d6-8882-9b895be3c494\") " pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-zmrmw" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.961668 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrvj6\" (UniqueName: \"kubernetes.io/projected/ea4aab30-444b-4efe-a76d-bcd6c673fffe-kube-api-access-lrvj6\") pod \"glance-operator-controller-manager-84b9b84486-bdvrf\" (UID: \"ea4aab30-444b-4efe-a76d-bcd6c673fffe\") " pod="openstack-operators/glance-operator-controller-manager-84b9b84486-bdvrf" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.963008 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-nmq2v"] Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.964990 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-5ngjz" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.985794 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-v7jqb" Oct 11 07:11:45 crc kubenswrapper[5055]: I1011 07:11:45.987095 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-zmrmw" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.001679 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-bdvrf" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.031281 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5-cert\") pod \"infra-operator-controller-manager-656bcbd775-rk8j7\" (UID: \"5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.031368 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5b9k8\" (UniqueName: \"kubernetes.io/projected/c6fd3cbf-29c5-4820-8936-2203cfa34345-kube-api-access-5b9k8\") pod \"horizon-operator-controller-manager-7ffbcb7588-5zhf2\" (UID: \"c6fd3cbf-29c5-4820-8936-2203cfa34345\") " pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-5zhf2" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.031445 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbtx4\" (UniqueName: \"kubernetes.io/projected/5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5-kube-api-access-sbtx4\") pod \"infra-operator-controller-manager-656bcbd775-rk8j7\" (UID: \"5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" Oct 11 07:11:46 crc kubenswrapper[5055]: E1011 07:11:46.031895 5055 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Oct 11 07:11:46 crc kubenswrapper[5055]: E1011 07:11:46.031946 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5-cert podName:5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5 nodeName:}" failed. No retries permitted until 2025-10-11 07:11:46.531928689 +0000 UTC m=+1090.306202496 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5-cert") pod "infra-operator-controller-manager-656bcbd775-rk8j7" (UID: "5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5") : secret "infra-operator-webhook-server-cert" not found Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.041317 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-gxs22" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.046020 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5f67fbc655-9qdps"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.059919 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-9qdps" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.062996 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-x995k" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.080286 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5b9k8\" (UniqueName: \"kubernetes.io/projected/c6fd3cbf-29c5-4820-8936-2203cfa34345-kube-api-access-5b9k8\") pod \"horizon-operator-controller-manager-7ffbcb7588-5zhf2\" (UID: \"c6fd3cbf-29c5-4820-8936-2203cfa34345\") " pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-5zhf2" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.107493 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5f67fbc655-9qdps"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.110630 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-5zhf2" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.111487 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbtx4\" (UniqueName: \"kubernetes.io/projected/5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5-kube-api-access-sbtx4\") pod \"infra-operator-controller-manager-656bcbd775-rk8j7\" (UID: \"5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.137745 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gvfr\" (UniqueName: \"kubernetes.io/projected/a711f3ae-fd01-49ab-8349-18c12be42a20-kube-api-access-5gvfr\") pod \"ironic-operator-controller-manager-9c5c78d49-jsbhj\" (UID: \"a711f3ae-fd01-49ab-8349-18c12be42a20\") " pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-jsbhj" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.138132 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9w5f\" (UniqueName: \"kubernetes.io/projected/d62acace-7cb6-44e0-8b54-761a9cbd9cfe-kube-api-access-r9w5f\") pod \"keystone-operator-controller-manager-55b6b7c7b8-nmq2v\" (UID: \"d62acace-7cb6-44e0-8b54-761a9cbd9cfe\") " pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-nmq2v" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.138190 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l75b6\" (UniqueName: \"kubernetes.io/projected/fc32b680-c849-4967-9a78-49c724018aa5-kube-api-access-l75b6\") pod \"manila-operator-controller-manager-5f67fbc655-9qdps\" (UID: \"fc32b680-c849-4967-9a78-49c724018aa5\") " pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-9qdps" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.167845 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-qhkjb"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.169376 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-qhkjb" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.192621 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-swtnq" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.200997 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-79d585cb66-p6cwz"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.203734 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-p6cwz" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.217868 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-ksj9s" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.240913 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l75b6\" (UniqueName: \"kubernetes.io/projected/fc32b680-c849-4967-9a78-49c724018aa5-kube-api-access-l75b6\") pod \"manila-operator-controller-manager-5f67fbc655-9qdps\" (UID: \"fc32b680-c849-4967-9a78-49c724018aa5\") " pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-9qdps" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.241023 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gvfr\" (UniqueName: \"kubernetes.io/projected/a711f3ae-fd01-49ab-8349-18c12be42a20-kube-api-access-5gvfr\") pod \"ironic-operator-controller-manager-9c5c78d49-jsbhj\" (UID: \"a711f3ae-fd01-49ab-8349-18c12be42a20\") " pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-jsbhj" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.241079 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9w5f\" (UniqueName: \"kubernetes.io/projected/d62acace-7cb6-44e0-8b54-761a9cbd9cfe-kube-api-access-r9w5f\") pod \"keystone-operator-controller-manager-55b6b7c7b8-nmq2v\" (UID: \"d62acace-7cb6-44e0-8b54-761a9cbd9cfe\") " pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-nmq2v" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.246089 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-5df598886f-54hjb"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.247380 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-5df598886f-54hjb" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.261343 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-7f57r" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.273126 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l75b6\" (UniqueName: \"kubernetes.io/projected/fc32b680-c849-4967-9a78-49c724018aa5-kube-api-access-l75b6\") pod \"manila-operator-controller-manager-5f67fbc655-9qdps\" (UID: \"fc32b680-c849-4967-9a78-49c724018aa5\") " pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-9qdps" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.277554 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-spd9v"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.279421 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gvfr\" (UniqueName: \"kubernetes.io/projected/a711f3ae-fd01-49ab-8349-18c12be42a20-kube-api-access-5gvfr\") pod \"ironic-operator-controller-manager-9c5c78d49-jsbhj\" (UID: \"a711f3ae-fd01-49ab-8349-18c12be42a20\") " pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-jsbhj" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.279713 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-spd9v" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.280053 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9w5f\" (UniqueName: \"kubernetes.io/projected/d62acace-7cb6-44e0-8b54-761a9cbd9cfe-kube-api-access-r9w5f\") pod \"keystone-operator-controller-manager-55b6b7c7b8-nmq2v\" (UID: \"d62acace-7cb6-44e0-8b54-761a9cbd9cfe\") " pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-nmq2v" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.282102 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-dr74f" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.293321 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-qhkjb"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.301484 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-79d585cb66-p6cwz"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.312070 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-5df598886f-54hjb"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.320285 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-spd9v"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.340518 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-79df5fb58c-lw5s2"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.341864 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-lw5s2" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.342985 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djc9z\" (UniqueName: \"kubernetes.io/projected/8e44a30e-0e4b-4560-b22a-755a8ae3af75-kube-api-access-djc9z\") pod \"neutron-operator-controller-manager-79d585cb66-p6cwz\" (UID: \"8e44a30e-0e4b-4560-b22a-755a8ae3af75\") " pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-p6cwz" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.343121 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdr2r\" (UniqueName: \"kubernetes.io/projected/19a5d205-97f5-41fa-a9df-578a2c60549f-kube-api-access-jdr2r\") pod \"mariadb-operator-controller-manager-f9fb45f8f-qhkjb\" (UID: \"19a5d205-97f5-41fa-a9df-578a2c60549f\") " pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-qhkjb" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.348354 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-z5526" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.351378 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.352547 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.356662 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.357253 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-g5g4h" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.367301 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-79df5fb58c-lw5s2"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.409836 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-68b6c87b68-xwbdv"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.411748 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-xwbdv" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.432252 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-cl68t" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.445913 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzgcs\" (UniqueName: \"kubernetes.io/projected/2e808412-35a4-431c-8ea1-96e0730a48bb-kube-api-access-xzgcs\") pod \"nova-operator-controller-manager-5df598886f-54hjb\" (UID: \"2e808412-35a4-431c-8ea1-96e0730a48bb\") " pod="openstack-operators/nova-operator-controller-manager-5df598886f-54hjb" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.445958 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq762\" (UniqueName: \"kubernetes.io/projected/ae29373a-08c3-40d4-8f74-b92069a970bf-kube-api-access-xq762\") pod \"octavia-operator-controller-manager-69fdcfc5f5-spd9v\" (UID: \"ae29373a-08c3-40d4-8f74-b92069a970bf\") " pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-spd9v" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.445980 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k59qd\" (UniqueName: \"kubernetes.io/projected/4b9b0bd4-6cb3-4970-823f-c942d9567b64-kube-api-access-k59qd\") pod \"openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn\" (UID: \"4b9b0bd4-6cb3-4970-823f-c942d9567b64\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.446018 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djc9z\" (UniqueName: \"kubernetes.io/projected/8e44a30e-0e4b-4560-b22a-755a8ae3af75-kube-api-access-djc9z\") pod \"neutron-operator-controller-manager-79d585cb66-p6cwz\" (UID: \"8e44a30e-0e4b-4560-b22a-755a8ae3af75\") " pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-p6cwz" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.446060 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdr2r\" (UniqueName: \"kubernetes.io/projected/19a5d205-97f5-41fa-a9df-578a2c60549f-kube-api-access-jdr2r\") pod \"mariadb-operator-controller-manager-f9fb45f8f-qhkjb\" (UID: \"19a5d205-97f5-41fa-a9df-578a2c60549f\") " pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-qhkjb" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.446083 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrfkz\" (UniqueName: \"kubernetes.io/projected/d3bd380a-8fb6-4323-a11d-85b5c5123276-kube-api-access-rrfkz\") pod \"ovn-operator-controller-manager-79df5fb58c-lw5s2\" (UID: \"d3bd380a-8fb6-4323-a11d-85b5c5123276\") " pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-lw5s2" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.446115 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4b9b0bd4-6cb3-4970-823f-c942d9567b64-cert\") pod \"openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn\" (UID: \"4b9b0bd4-6cb3-4970-823f-c942d9567b64\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.446665 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-9qdps" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.457465 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-68b6c87b68-xwbdv"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.470487 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.472150 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djc9z\" (UniqueName: \"kubernetes.io/projected/8e44a30e-0e4b-4560-b22a-755a8ae3af75-kube-api-access-djc9z\") pod \"neutron-operator-controller-manager-79d585cb66-p6cwz\" (UID: \"8e44a30e-0e4b-4560-b22a-755a8ae3af75\") " pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-p6cwz" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.474291 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdr2r\" (UniqueName: \"kubernetes.io/projected/19a5d205-97f5-41fa-a9df-578a2c60549f-kube-api-access-jdr2r\") pod \"mariadb-operator-controller-manager-f9fb45f8f-qhkjb\" (UID: \"19a5d205-97f5-41fa-a9df-578a2c60549f\") " pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-qhkjb" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.480076 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-db6d7f97b-lqf4k"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.481469 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-lqf4k" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.492389 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-6gk9q" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.502455 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-jsbhj" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.502927 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-67cfc6749b-wm2wn"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.505887 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-wm2wn" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.508600 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5458f77c4-brxz6"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.510223 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5458f77c4-brxz6" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.513212 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-gqg6k" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.513552 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-xj9mh" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.517205 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-db6d7f97b-lqf4k"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.528442 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-67cfc6749b-wm2wn"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.541520 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-qhkjb" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.545386 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-nmq2v" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.547316 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5-cert\") pod \"infra-operator-controller-manager-656bcbd775-rk8j7\" (UID: \"5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.547376 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrfkz\" (UniqueName: \"kubernetes.io/projected/d3bd380a-8fb6-4323-a11d-85b5c5123276-kube-api-access-rrfkz\") pod \"ovn-operator-controller-manager-79df5fb58c-lw5s2\" (UID: \"d3bd380a-8fb6-4323-a11d-85b5c5123276\") " pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-lw5s2" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.547430 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4b9b0bd4-6cb3-4970-823f-c942d9567b64-cert\") pod \"openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn\" (UID: \"4b9b0bd4-6cb3-4970-823f-c942d9567b64\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.547466 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzgcs\" (UniqueName: \"kubernetes.io/projected/2e808412-35a4-431c-8ea1-96e0730a48bb-kube-api-access-xzgcs\") pod \"nova-operator-controller-manager-5df598886f-54hjb\" (UID: \"2e808412-35a4-431c-8ea1-96e0730a48bb\") " pod="openstack-operators/nova-operator-controller-manager-5df598886f-54hjb" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.547504 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq762\" (UniqueName: \"kubernetes.io/projected/ae29373a-08c3-40d4-8f74-b92069a970bf-kube-api-access-xq762\") pod \"octavia-operator-controller-manager-69fdcfc5f5-spd9v\" (UID: \"ae29373a-08c3-40d4-8f74-b92069a970bf\") " pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-spd9v" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.547524 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k59qd\" (UniqueName: \"kubernetes.io/projected/4b9b0bd4-6cb3-4970-823f-c942d9567b64-kube-api-access-k59qd\") pod \"openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn\" (UID: \"4b9b0bd4-6cb3-4970-823f-c942d9567b64\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.547555 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92lc4\" (UniqueName: \"kubernetes.io/projected/a7abb018-37e9-4d57-90bf-e6333f7d252c-kube-api-access-92lc4\") pod \"placement-operator-controller-manager-68b6c87b68-xwbdv\" (UID: \"a7abb018-37e9-4d57-90bf-e6333f7d252c\") " pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-xwbdv" Oct 11 07:11:46 crc kubenswrapper[5055]: E1011 07:11:46.547740 5055 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Oct 11 07:11:46 crc kubenswrapper[5055]: E1011 07:11:46.547826 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5-cert podName:5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5 nodeName:}" failed. No retries permitted until 2025-10-11 07:11:47.547810163 +0000 UTC m=+1091.322083970 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5-cert") pod "infra-operator-controller-manager-656bcbd775-rk8j7" (UID: "5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5") : secret "infra-operator-webhook-server-cert" not found Oct 11 07:11:46 crc kubenswrapper[5055]: E1011 07:11:46.548334 5055 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Oct 11 07:11:46 crc kubenswrapper[5055]: E1011 07:11:46.548360 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4b9b0bd4-6cb3-4970-823f-c942d9567b64-cert podName:4b9b0bd4-6cb3-4970-823f-c942d9567b64 nodeName:}" failed. No retries permitted until 2025-10-11 07:11:47.048352868 +0000 UTC m=+1090.822626675 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4b9b0bd4-6cb3-4970-823f-c942d9567b64-cert") pod "openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" (UID: "4b9b0bd4-6cb3-4970-823f-c942d9567b64") : secret "openstack-baremetal-operator-webhook-server-cert" not found Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.556740 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7f554bff7b-lf2lh"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.569341 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-p6cwz" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.574091 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5458f77c4-brxz6"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.574129 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7f554bff7b-lf2lh"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.574238 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-lf2lh" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.582108 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-nmrmr" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.588231 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq762\" (UniqueName: \"kubernetes.io/projected/ae29373a-08c3-40d4-8f74-b92069a970bf-kube-api-access-xq762\") pod \"octavia-operator-controller-manager-69fdcfc5f5-spd9v\" (UID: \"ae29373a-08c3-40d4-8f74-b92069a970bf\") " pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-spd9v" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.588586 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrfkz\" (UniqueName: \"kubernetes.io/projected/d3bd380a-8fb6-4323-a11d-85b5c5123276-kube-api-access-rrfkz\") pod \"ovn-operator-controller-manager-79df5fb58c-lw5s2\" (UID: \"d3bd380a-8fb6-4323-a11d-85b5c5123276\") " pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-lw5s2" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.593259 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzgcs\" (UniqueName: \"kubernetes.io/projected/2e808412-35a4-431c-8ea1-96e0730a48bb-kube-api-access-xzgcs\") pod \"nova-operator-controller-manager-5df598886f-54hjb\" (UID: \"2e808412-35a4-431c-8ea1-96e0730a48bb\") " pod="openstack-operators/nova-operator-controller-manager-5df598886f-54hjb" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.593626 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k59qd\" (UniqueName: \"kubernetes.io/projected/4b9b0bd4-6cb3-4970-823f-c942d9567b64-kube-api-access-k59qd\") pod \"openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn\" (UID: \"4b9b0bd4-6cb3-4970-823f-c942d9567b64\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.628210 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-spd9v" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.632325 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.646208 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.650054 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hp2xg\" (UniqueName: \"kubernetes.io/projected/86f57e52-de78-48ca-86ec-69286b53726c-kube-api-access-hp2xg\") pod \"telemetry-operator-controller-manager-67cfc6749b-wm2wn\" (UID: \"86f57e52-de78-48ca-86ec-69286b53726c\") " pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-wm2wn" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.650107 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92lc4\" (UniqueName: \"kubernetes.io/projected/a7abb018-37e9-4d57-90bf-e6333f7d252c-kube-api-access-92lc4\") pod \"placement-operator-controller-manager-68b6c87b68-xwbdv\" (UID: \"a7abb018-37e9-4d57-90bf-e6333f7d252c\") " pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-xwbdv" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.650126 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ls4gm\" (UniqueName: \"kubernetes.io/projected/83d0866a-b853-4873-bf7e-8000c2f7a63a-kube-api-access-ls4gm\") pod \"swift-operator-controller-manager-db6d7f97b-lqf4k\" (UID: \"83d0866a-b853-4873-bf7e-8000c2f7a63a\") " pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-lqf4k" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.650198 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gt2h\" (UniqueName: \"kubernetes.io/projected/587a08b9-a65b-43fc-9d11-80568dc93742-kube-api-access-6gt2h\") pod \"test-operator-controller-manager-5458f77c4-brxz6\" (UID: \"587a08b9-a65b-43fc-9d11-80568dc93742\") " pod="openstack-operators/test-operator-controller-manager-5458f77c4-brxz6" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.651588 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-rsckx" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.652482 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.673392 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-lw5s2" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.681509 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92lc4\" (UniqueName: \"kubernetes.io/projected/a7abb018-37e9-4d57-90bf-e6333f7d252c-kube-api-access-92lc4\") pod \"placement-operator-controller-manager-68b6c87b68-xwbdv\" (UID: \"a7abb018-37e9-4d57-90bf-e6333f7d252c\") " pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-xwbdv" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.729117 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.741271 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-xwbdv" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.742606 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.743786 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.748839 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-8vlm8" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.750216 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.752059 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ls4gm\" (UniqueName: \"kubernetes.io/projected/83d0866a-b853-4873-bf7e-8000c2f7a63a-kube-api-access-ls4gm\") pod \"swift-operator-controller-manager-db6d7f97b-lqf4k\" (UID: \"83d0866a-b853-4873-bf7e-8000c2f7a63a\") " pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-lqf4k" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.752177 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwmmm\" (UniqueName: \"kubernetes.io/projected/a027701a-75f2-4640-a841-11dab6a3d4f9-kube-api-access-wwmmm\") pod \"openstack-operator-controller-manager-5b95c8954b-kwb47\" (UID: \"a027701a-75f2-4640-a841-11dab6a3d4f9\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.752210 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gt2h\" (UniqueName: \"kubernetes.io/projected/587a08b9-a65b-43fc-9d11-80568dc93742-kube-api-access-6gt2h\") pod \"test-operator-controller-manager-5458f77c4-brxz6\" (UID: \"587a08b9-a65b-43fc-9d11-80568dc93742\") " pod="openstack-operators/test-operator-controller-manager-5458f77c4-brxz6" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.752278 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mt9nm\" (UniqueName: \"kubernetes.io/projected/8cac90ea-fff7-4955-89bf-984d1c0f3094-kube-api-access-mt9nm\") pod \"watcher-operator-controller-manager-7f554bff7b-lf2lh\" (UID: \"8cac90ea-fff7-4955-89bf-984d1c0f3094\") " pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-lf2lh" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.752315 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hp2xg\" (UniqueName: \"kubernetes.io/projected/86f57e52-de78-48ca-86ec-69286b53726c-kube-api-access-hp2xg\") pod \"telemetry-operator-controller-manager-67cfc6749b-wm2wn\" (UID: \"86f57e52-de78-48ca-86ec-69286b53726c\") " pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-wm2wn" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.752353 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a027701a-75f2-4640-a841-11dab6a3d4f9-cert\") pod \"openstack-operator-controller-manager-5b95c8954b-kwb47\" (UID: \"a027701a-75f2-4640-a841-11dab6a3d4f9\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.780029 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ls4gm\" (UniqueName: \"kubernetes.io/projected/83d0866a-b853-4873-bf7e-8000c2f7a63a-kube-api-access-ls4gm\") pod \"swift-operator-controller-manager-db6d7f97b-lqf4k\" (UID: \"83d0866a-b853-4873-bf7e-8000c2f7a63a\") " pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-lqf4k" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.789712 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-658bdf4b74-5ngjz"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.792853 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hp2xg\" (UniqueName: \"kubernetes.io/projected/86f57e52-de78-48ca-86ec-69286b53726c-kube-api-access-hp2xg\") pod \"telemetry-operator-controller-manager-67cfc6749b-wm2wn\" (UID: \"86f57e52-de78-48ca-86ec-69286b53726c\") " pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-wm2wn" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.798421 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gt2h\" (UniqueName: \"kubernetes.io/projected/587a08b9-a65b-43fc-9d11-80568dc93742-kube-api-access-6gt2h\") pod \"test-operator-controller-manager-5458f77c4-brxz6\" (UID: \"587a08b9-a65b-43fc-9d11-80568dc93742\") " pod="openstack-operators/test-operator-controller-manager-5458f77c4-brxz6" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.844930 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-lqf4k" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.861399 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwmmm\" (UniqueName: \"kubernetes.io/projected/a027701a-75f2-4640-a841-11dab6a3d4f9-kube-api-access-wwmmm\") pod \"openstack-operator-controller-manager-5b95c8954b-kwb47\" (UID: \"a027701a-75f2-4640-a841-11dab6a3d4f9\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.861568 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt9nm\" (UniqueName: \"kubernetes.io/projected/8cac90ea-fff7-4955-89bf-984d1c0f3094-kube-api-access-mt9nm\") pod \"watcher-operator-controller-manager-7f554bff7b-lf2lh\" (UID: \"8cac90ea-fff7-4955-89bf-984d1c0f3094\") " pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-lf2lh" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.861637 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a027701a-75f2-4640-a841-11dab6a3d4f9-cert\") pod \"openstack-operator-controller-manager-5b95c8954b-kwb47\" (UID: \"a027701a-75f2-4640-a841-11dab6a3d4f9\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" Oct 11 07:11:46 crc kubenswrapper[5055]: E1011 07:11:46.861848 5055 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Oct 11 07:11:46 crc kubenswrapper[5055]: E1011 07:11:46.861951 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a027701a-75f2-4640-a841-11dab6a3d4f9-cert podName:a027701a-75f2-4640-a841-11dab6a3d4f9 nodeName:}" failed. No retries permitted until 2025-10-11 07:11:47.361930618 +0000 UTC m=+1091.136204425 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a027701a-75f2-4640-a841-11dab6a3d4f9-cert") pod "openstack-operator-controller-manager-5b95c8954b-kwb47" (UID: "a027701a-75f2-4640-a841-11dab6a3d4f9") : secret "webhook-server-cert" not found Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.862088 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-wm2wn" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.861845 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2d9dz\" (UniqueName: \"kubernetes.io/projected/c06ae024-6af2-4435-b90b-d8c16d62aaf9-kube-api-access-2d9dz\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9\" (UID: \"c06ae024-6af2-4435-b90b-d8c16d62aaf9\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.881071 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-5df598886f-54hjb" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.888267 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5458f77c4-brxz6" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.891453 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mt9nm\" (UniqueName: \"kubernetes.io/projected/8cac90ea-fff7-4955-89bf-984d1c0f3094-kube-api-access-mt9nm\") pod \"watcher-operator-controller-manager-7f554bff7b-lf2lh\" (UID: \"8cac90ea-fff7-4955-89bf-984d1c0f3094\") " pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-lf2lh" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.891611 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwmmm\" (UniqueName: \"kubernetes.io/projected/a027701a-75f2-4640-a841-11dab6a3d4f9-kube-api-access-wwmmm\") pod \"openstack-operator-controller-manager-5b95c8954b-kwb47\" (UID: \"a027701a-75f2-4640-a841-11dab6a3d4f9\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.894461 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-7b7fb68549-v7jqb"] Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.932514 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-lf2lh" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.964850 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2d9dz\" (UniqueName: \"kubernetes.io/projected/c06ae024-6af2-4435-b90b-d8c16d62aaf9-kube-api-access-2d9dz\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9\" (UID: \"c06ae024-6af2-4435-b90b-d8c16d62aaf9\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9" Oct 11 07:11:46 crc kubenswrapper[5055]: I1011 07:11:46.987678 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-85d5d9dd78-zmrmw"] Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.004984 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2d9dz\" (UniqueName: \"kubernetes.io/projected/c06ae024-6af2-4435-b90b-d8c16d62aaf9-kube-api-access-2d9dz\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9\" (UID: \"c06ae024-6af2-4435-b90b-d8c16d62aaf9\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9" Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.009240 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-84b9b84486-bdvrf"] Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.045166 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-5ngjz" event={"ID":"b3d55b74-0882-459a-be9a-2da659337819","Type":"ContainerStarted","Data":"59e6f5bbaaa3fc0887155c34fb7d7b69c67f57cfc3810ba9226c43b110a3a281"} Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.066019 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4b9b0bd4-6cb3-4970-823f-c942d9567b64-cert\") pod \"openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn\" (UID: \"4b9b0bd4-6cb3-4970-823f-c942d9567b64\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.071125 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4b9b0bd4-6cb3-4970-823f-c942d9567b64-cert\") pod \"openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn\" (UID: \"4b9b0bd4-6cb3-4970-823f-c942d9567b64\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.081317 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9" Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.199181 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-858f76bbdd-gxs22"] Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.206067 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7ffbcb7588-5zhf2"] Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.213853 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-qhkjb"] Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.301052 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.310157 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5f67fbc655-9qdps"] Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.371571 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a027701a-75f2-4640-a841-11dab6a3d4f9-cert\") pod \"openstack-operator-controller-manager-5b95c8954b-kwb47\" (UID: \"a027701a-75f2-4640-a841-11dab6a3d4f9\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" Oct 11 07:11:47 crc kubenswrapper[5055]: E1011 07:11:47.371699 5055 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Oct 11 07:11:47 crc kubenswrapper[5055]: E1011 07:11:47.371748 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a027701a-75f2-4640-a841-11dab6a3d4f9-cert podName:a027701a-75f2-4640-a841-11dab6a3d4f9 nodeName:}" failed. No retries permitted until 2025-10-11 07:11:48.371732249 +0000 UTC m=+1092.146006056 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a027701a-75f2-4640-a841-11dab6a3d4f9-cert") pod "openstack-operator-controller-manager-5b95c8954b-kwb47" (UID: "a027701a-75f2-4640-a841-11dab6a3d4f9") : secret "webhook-server-cert" not found Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.568401 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-9c5c78d49-jsbhj"] Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.576466 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5-cert\") pod \"infra-operator-controller-manager-656bcbd775-rk8j7\" (UID: \"5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.585288 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5-cert\") pod \"infra-operator-controller-manager-656bcbd775-rk8j7\" (UID: \"5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5\") " pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.685152 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.831440 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-nmq2v"] Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.840109 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-79df5fb58c-lw5s2"] Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.862549 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-67cfc6749b-wm2wn"] Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.946801 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-68b6c87b68-xwbdv"] Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.954446 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-spd9v"] Oct 11 07:11:47 crc kubenswrapper[5055]: I1011 07:11:47.959061 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-db6d7f97b-lqf4k"] Oct 11 07:11:47 crc kubenswrapper[5055]: W1011 07:11:47.961721 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae29373a_08c3_40d4_8f74_b92069a970bf.slice/crio-4e26fd5abf6a53500086a38142a1b556274a5f5b178fa0a27dffaa72f4f0daec WatchSource:0}: Error finding container 4e26fd5abf6a53500086a38142a1b556274a5f5b178fa0a27dffaa72f4f0daec: Status 404 returned error can't find the container with id 4e26fd5abf6a53500086a38142a1b556274a5f5b178fa0a27dffaa72f4f0daec Oct 11 07:11:47 crc kubenswrapper[5055]: W1011 07:11:47.962896 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83d0866a_b853_4873_bf7e_8000c2f7a63a.slice/crio-2a3442a5d39951047fd3a4bc6bb8ca91a2521ad5e50fc79dfb30883076dbb5dd WatchSource:0}: Error finding container 2a3442a5d39951047fd3a4bc6bb8ca91a2521ad5e50fc79dfb30883076dbb5dd: Status 404 returned error can't find the container with id 2a3442a5d39951047fd3a4bc6bb8ca91a2521ad5e50fc79dfb30883076dbb5dd Oct 11 07:11:47 crc kubenswrapper[5055]: W1011 07:11:47.969923 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda7abb018_37e9_4d57_90bf_e6333f7d252c.slice/crio-50a1e8f9ff81188852c3ff34aa186a2373ed21bd5f73aff33a58d30f397ec9bd WatchSource:0}: Error finding container 50a1e8f9ff81188852c3ff34aa186a2373ed21bd5f73aff33a58d30f397ec9bd: Status 404 returned error can't find the container with id 50a1e8f9ff81188852c3ff34aa186a2373ed21bd5f73aff33a58d30f397ec9bd Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.173359 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-spd9v" event={"ID":"ae29373a-08c3-40d4-8f74-b92069a970bf","Type":"ContainerStarted","Data":"4e26fd5abf6a53500086a38142a1b556274a5f5b178fa0a27dffaa72f4f0daec"} Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.188276 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-79d585cb66-p6cwz"] Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.203505 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-jsbhj" event={"ID":"a711f3ae-fd01-49ab-8349-18c12be42a20","Type":"ContainerStarted","Data":"ecbd5db9ca613594d186308ecb2104c85335e35a2915da81100b1a072d60a2d6"} Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.242514 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7f554bff7b-lf2lh"] Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.250956 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-lqf4k" event={"ID":"83d0866a-b853-4873-bf7e-8000c2f7a63a","Type":"ContainerStarted","Data":"2a3442a5d39951047fd3a4bc6bb8ca91a2521ad5e50fc79dfb30883076dbb5dd"} Oct 11 07:11:48 crc kubenswrapper[5055]: E1011 07:11:48.251340 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:7e584b1c430441c8b6591dadeff32e065de8a185ad37ef90d2e08d37e59aab4a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6gt2h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5458f77c4-brxz6_openstack-operators(587a08b9-a65b-43fc-9d11-80568dc93742): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 11 07:11:48 crc kubenswrapper[5055]: E1011 07:11:48.251374 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:b2e9acf568a48c28cf2aed6012e432eeeb7d5f0eb11878fc91b62bc34cba10cd,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xzgcs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-5df598886f-54hjb_openstack-operators(2e808412-35a4-431c-8ea1-96e0730a48bb): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.251626 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-5df598886f-54hjb"] Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.295042 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5458f77c4-brxz6"] Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.295075 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9"] Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.352009 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-wm2wn" event={"ID":"86f57e52-de78-48ca-86ec-69286b53726c","Type":"ContainerStarted","Data":"6e6971324eda95759560f156ee8ceb48a6d9b6881a64a1def7abb027b697bcd4"} Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.352667 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn"] Oct 11 07:11:48 crc kubenswrapper[5055]: E1011 07:11:48.363940 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2d9dz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9_openstack-operators(c06ae024-6af2-4435-b90b-d8c16d62aaf9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 11 07:11:48 crc kubenswrapper[5055]: E1011 07:11:48.370555 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9" podUID="c06ae024-6af2-4435-b90b-d8c16d62aaf9" Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.374947 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-xwbdv" event={"ID":"a7abb018-37e9-4d57-90bf-e6333f7d252c","Type":"ContainerStarted","Data":"50a1e8f9ff81188852c3ff34aa186a2373ed21bd5f73aff33a58d30f397ec9bd"} Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.386350 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-qhkjb" event={"ID":"19a5d205-97f5-41fa-a9df-578a2c60549f","Type":"ContainerStarted","Data":"ffac2ffa71ffe8e2240cccf6b3d6e9833043af56c69cf8390025c50268078101"} Oct 11 07:11:48 crc kubenswrapper[5055]: E1011 07:11:48.387980 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:a17fc270857869fd1efe5020b2a1cb8c2abbd838f08de88f3a6a59e8754ec351,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent@sha256:03b4f3db4b373515f7e4095984b97197c05a14f87b2a0a525eb5d7be1d7bda66,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner@sha256:6722a752fb7cbffbae811f6ad6567120fbd4ebbe8c38a83ec2df02850a3276bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api@sha256:2115452234aedb505ed4efc6cd9b9a4ce3b9809aa7d0128d8fbeeee84dad1a69,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator@sha256:50597a8eaa6c4383f357574dcab8358b698729797b4156d932985a08ab86b7cd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener@sha256:cb4997d62c7b2534233a676cb92e19cf85dda07e2fb9fa642c28aab30489f69a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier@sha256:1ccbf3f6cf24c9ee91bed71467491e22b8cb4b95bce90250f4174fae936b0fa1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24@sha256:e91d58021b54c46883595ff66be65882de54abdb3be2ca53c4162b20d18b5f48,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:cbe345acb37e57986ecf6685d28c72d0e639bdb493a18e9d3ba947d6c3a16384,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener@sha256:e7dcc3bf23d5e0393ac173e3c43d4ae85f4613a4fd16b3c147dc32ae491d49bf,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker@sha256:2a1a8b582c6e4cc31081bd8b0887acf45e31c1d14596c4e361d27d08fef0debf,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:86daeb9c834bfcedb533086dff59a6b5b6e832b94ce2a9116337f8736bb80032,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute@sha256:5d4fdf424fad33a3650163e9e7423f92e97de3305508c2b7c6435822e0313189,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi@sha256:6d28de018f6e1672e775a75735e3bc16b63da41acd8fb5196ee0b06856c07133,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter@sha256:7211a617ec657701ca819aa0ba28e1d5750f5bf2c1391b755cc4a48cc360b0fa,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification@sha256:c5fc9b72fc593bcf3b569c7ed24a256448eb1afab1504e668a3822e978be1306,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core@sha256:09b5017c95d7697e66b9c64846bc48ef5826a009cba89b956ec54561e5f4a2d1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:85c75d60e1bd2f8a9ea0a2bb21a8df64c0a6f7b504cc1a05a355981d4b90e92f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup@sha256:88b99249f15470f359fb554f7f3a56974b743f4655e3f0c982c0260f75a67697,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler@sha256:e861d66785047d39eb68d9bac23e3f57ac84d9bd95593502d9b3b913b99fd1a4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume@sha256:b95f09bf3d259f9eacf3b63931977483f5c3c332f49b95ee8a69d8e3fb71d082,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api@sha256:6fc7801c0d18d41b9f11484b1cdb342de9cebd93072ec2205dbe40945715184f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9@sha256:d4d824b80cbed683543d9e8c7045ac97e080774f45a5067ccbca26404e067821,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central@sha256:182ec75938d8d3fb7d8f916373368add24062fec90489aa57776a81d0b36ea20,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns@sha256:9507ba5ab74cbae902e2dc07f89c7b3b5b76d8079e444365fe0eee6000fd7aaa,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer@sha256:17db080dcc4099f8a20aa0f238b6bca5c104672ae46743adeab9d1637725ecaa,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound@sha256:fd55cf3d73bfdc518419c9ba0b0cbef275140ae2d3bd0342a7310f81d57c2d78,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker@sha256:d164a9bd383f50df69fc22e7422f4650cd5076c90ed19278fc0f04e54345a63d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr@sha256:6beffe7d0bd75f9d1f495aeb7ab2334a2414af2c581d4833363df8441ed01018,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid@sha256:261e76f60c6bc6b172dc3608504552c63e83358a4fa3c0952a671544d83aa83f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler@sha256:581b65b646301e0fcb07582150ba63438f1353a85bf9acf1eb2acb4ce71c58bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron@sha256:2308c7b6c3d0aabbadfc9a06d84d67d2243f27fe8eed740ee96b1ce910203f62,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd@sha256:02d33f59749441cd5751c319e9d7cff97ab1004844c0e992650d340c6e8fbf43,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent@sha256:9cf0ca292340f1f978603955ef682effbf24316d6e2376b1c89906d84c3f06d0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:c3e651f35b930bcf1a3084be8910c2f3f34d22a976c5379cf518a68d9994bfa7,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent@sha256:58f678016d7f6c8fe579abe886fd138ef853642faa6766ca60639feac12d82ac,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent@sha256:46f92909153aaf03a585374b77d103c536509747e3270558d9a533295c46a7c5,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter@sha256:39c642b2b337e38c18e80266fb14383754178202f40103646337722a594d984c,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent@sha256:7fe367f51638c5c302fd3f8e66a31b09cb3b11519a7f72ef142b6c6fe8b91694,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter@sha256:d339ba049bbd1adccb795962bf163f5b22fd84dea865d88b9eb525e46247d6bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api@sha256:9ebf424d4107275a2e3f21f7a18ef257ff2f97c1298109ac7c802a5a4f4794f2,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api@sha256:4fcbe0d9a3c845708ecc32102ad4abbcbd947d87e5cf91f186de75b5d84ec681,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn@sha256:58a4e9a4dea86635c93ce37a2bb3c60ece62b3d656f6ee6a8845347cbb3e90fd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine@sha256:6f2b843bc9f4ceb1ee873972d69e6bae6e1dbd378b486995bc3697d8bcff6339,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon@sha256:03b4bb79b71d5ca7792d19c4c0ee08a5e5a407ad844c087305c42dd909ee7490,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached@sha256:773daada6402d9cad089cdc809d6c0335456d057ac1a25441ab5d82add2f70f4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis@sha256:7323406a63fb3fdbb3eea4da0f7e8ed89c94c9bd0ad5ecd6c18fa4a4c2c550c4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api@sha256:7ae82068011e2d2e5ddc88c943fd32ff4a11902793e7a1df729811b2e27122a0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor@sha256:0c762c15d9d98d39cc9dc3d1f9a70f9188fef58d4e2f3b0c69c896cab8da5e48,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector@sha256:febf65561eeef5b36b70d0d65ee83f6451e43ec97bfab4d826e14215da6ff19b,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent@sha256:b8aadfc3d547c5ef1e27fcb573d4760cf8c2f2271eefe1793c35a0d46b640837,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe@sha256:ecc91fd5079ee6d0c6ae1b11e97da790e33864d0e1930e574f959da2bddfa59a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent@sha256:2e981e93f99c929a3f04e5e41c8f645d44d390a9aeee3c5193cce7ec2edcbf3a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone@sha256:1e5714637b6e1a24c2858fe6d9bbb3f00bc61d69ad74a657b1c23682bf4cb2b7,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics@sha256:db384bf43222b066c378e77027a675d4cd9911107adba46c2922b3a55e10d6fb,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api@sha256:35b8dcf27dc3b67f3840fa0e693ff312f74f7e22c634dff206a5c4d0133c716c,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler@sha256:e109e4863e05e803dbfe04917756fd52231c560c65353170a2000be6cc2bb53d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share@sha256:6df0bebd9318ce11624413249e7e9781311638f276f8877668d3b382fe90e62f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:56b75d97f4a48c8cf58b3a7c18c43618efb308bf0188124f6301142e61299b0c,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils@sha256:a51ed62767206067aa501142dbf01f20b3d65325d30faf1b4d6424d5b17dfba5,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api@sha256:592e3cd32d3cc97a69093ad905b449aa374ffbb1b2644b738bb6c1434476d1f6,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute@sha256:5f179b847f2dc32d9110b8f2be9fe65f1aeada1e18105dffdaf052981215d844,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor@sha256:9596452e283febbe08204d0ef0fd1992af3395d0969f7ac76663ed7c8be5b4d4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy@sha256:d61005a10bef1b37762a8a41e6755c1169241e36cc5f92886bca6f4f6b9c381a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler@sha256:e6a4335bcbeed3cd3e73ac879f754e314761e4a417a67539ca88e96a79346328,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api@sha256:97d88fc53421b699fc91983313d7beec4a0f177089e95bdf5ba15c3f521db9a9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager@sha256:5365e5c9c3ad2ede1b6945255b2cc6b009d642c39babdf25e0655282cfa646fe,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping@sha256:5b55795d774e0ea160ff8a7fd491ed41cf2d93c7d821694abb3a879eaffcefeb,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog@sha256:26e955c46a6063eafcfeb79430bf3d9268dbe95687c00e63a624b3ec5a846f5a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker@sha256:58939baa18ab09e2b24996c5f3665ae52274b781f661ea06a67c991e9a832d5a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient@sha256:b8bff6857fec93c3c1521f1a8c23de21bcb86fc0f960972e81f6c3f95d4185be,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather@sha256:943eee724277e252795909137538a553ef5284c8103ad01b9be7b0138c66d14d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter@sha256:ecd56e6733c475f2d441344fd98f288c3eac0261ba113695fec7520a954ccbc7,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi@sha256:d97b08fd421065c8c33a523973822ac468500cbe853069aa9214393fbda7a908,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:d76f7d6620930cc2e9ac070492bbeb525f83ce5ff4947463e3784bf1ce04a857,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base@sha256:289dea3beea1cd4405895fc42e44372b35e4a941e31c59e102c333471a3ca9b7,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server@sha256:9b19894fa67a81bf8ba4159b55b49f38877c670aeb97e2021c341cef2a9294e4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd@sha256:ea164961ad30453ad0301c6b73364e1f1024f689634c88dd98265f9c7048e31d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server@sha256:6f9f2ea45f0271f6da8eb05a5f74cf5ce6769479346f5c2f407ee6f31a9c7ff3,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api@sha256:59448516174fc3bab679b9a8dd62cb9a9d16b5734aadbeb98e960e3b7c79bd22,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:adcdeb8ecd601fb03c3b0901d5b5111af2ca48f7dd443e22224db6daaf08f5d0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account@sha256:2bf32d9b95899d7637dfe19d07cf1ecc9a06593984faff57a3c0dce060012edb,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container@sha256:7a452cd18b64d522e8a1e25bdcea543e9fe5f5b76e1c5e044c2b5334e06a326b,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object@sha256:6a46aa13aa359b8e782a22d67db42db02bbf2bb7e35df4b684ac1daeda38cde3,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server@sha256:f6824854bea6b2acbb00c34639799b4744818d4adbdd40e37dc5088f9ae18d58,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all@sha256:a66d2fdc21f25c690f02e643d2666dbe7df43a64cd55086ec33d6755e6d809b9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api@sha256:30701a65382430570f6fb35621f64f1003f727b6da745ce84fb1a90436ee2350,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier@sha256:b9a657c51bbcc236e6c906a6df6c42cd2a28bab69e7ab58b0e9ced12295b2d87,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine@sha256:fd65fb5c9710c46aa1c31e65a51cd5c23ec35cf68c2452d421f919f2aa9b6255,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k59qd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn_openstack-operators(4b9b0bd4-6cb3-4970-823f-c942d9567b64): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.394974 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-v7jqb" event={"ID":"8e18a79a-705e-467e-b7cd-0fc0a90f1a7a","Type":"ContainerStarted","Data":"710a15b7bb68eb486d3dfe21bb5e20fe75e071d74cf9118c3cbff3c45959d267"} Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.400676 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-5zhf2" event={"ID":"c6fd3cbf-29c5-4820-8936-2203cfa34345","Type":"ContainerStarted","Data":"ee03e1d7011b878b9070885706501da5297884474c7a0a25f44f26cd7adefde6"} Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.409388 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-gxs22" event={"ID":"d0538072-f53e-4d2a-858e-667657ffd09e","Type":"ContainerStarted","Data":"ab025bb09724e86d61ab4aa2adcf5755e7e2fc6c769c79c6f9ea24d5d98d5145"} Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.416841 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-9qdps" event={"ID":"fc32b680-c849-4967-9a78-49c724018aa5","Type":"ContainerStarted","Data":"47e38a5db6bd89674c1505eca22f8cd3021e14268554e70cfa908defe7144e95"} Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.419255 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7"] Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.419639 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-bdvrf" event={"ID":"ea4aab30-444b-4efe-a76d-bcd6c673fffe","Type":"ContainerStarted","Data":"64248b9fb64f7f302c0891f1fcc940af10c27947b1517be54101fa5ea9634e61"} Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.421823 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-lw5s2" event={"ID":"d3bd380a-8fb6-4323-a11d-85b5c5123276","Type":"ContainerStarted","Data":"c2c40d7c1639bd4f208aa355c0184945b818c8115b0b02a21492a69fad02d241"} Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.437155 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-zmrmw" event={"ID":"cb2cd76d-3803-48d6-8882-9b895be3c494","Type":"ContainerStarted","Data":"4b307f8e32e821dd420265217d43d6051dd07a71aa800e628eebc6b572618368"} Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.438483 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-nmq2v" event={"ID":"d62acace-7cb6-44e0-8b54-761a9cbd9cfe","Type":"ContainerStarted","Data":"b1387d2dbb136a392483eb3b8bf8c47b127d5a3f7a62b20d3dec6f4602ced394"} Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.439469 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a027701a-75f2-4640-a841-11dab6a3d4f9-cert\") pod \"openstack-operator-controller-manager-5b95c8954b-kwb47\" (UID: \"a027701a-75f2-4640-a841-11dab6a3d4f9\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.449882 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a027701a-75f2-4640-a841-11dab6a3d4f9-cert\") pod \"openstack-operator-controller-manager-5b95c8954b-kwb47\" (UID: \"a027701a-75f2-4640-a841-11dab6a3d4f9\") " pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" Oct 11 07:11:48 crc kubenswrapper[5055]: E1011 07:11:48.461114 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:5cfb2ae1092445950b39dd59caa9a8c9367f42fb8353a8c3848d3bc729f24492,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sbtx4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-656bcbd775-rk8j7_openstack-operators(5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.517019 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" Oct 11 07:11:48 crc kubenswrapper[5055]: E1011 07:11:48.610900 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-5df598886f-54hjb" podUID="2e808412-35a4-431c-8ea1-96e0730a48bb" Oct 11 07:11:48 crc kubenswrapper[5055]: E1011 07:11:48.676267 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" podUID="4b9b0bd4-6cb3-4970-823f-c942d9567b64" Oct 11 07:11:48 crc kubenswrapper[5055]: E1011 07:11:48.702226 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-5458f77c4-brxz6" podUID="587a08b9-a65b-43fc-9d11-80568dc93742" Oct 11 07:11:48 crc kubenswrapper[5055]: E1011 07:11:48.775178 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" podUID="5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5" Oct 11 07:11:48 crc kubenswrapper[5055]: I1011 07:11:48.954047 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47"] Oct 11 07:11:49 crc kubenswrapper[5055]: I1011 07:11:49.479414 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9" event={"ID":"c06ae024-6af2-4435-b90b-d8c16d62aaf9","Type":"ContainerStarted","Data":"ce57314681fba078a19007f08486799cf40910a7d0f12c4d4f7ab2af25a9782e"} Oct 11 07:11:49 crc kubenswrapper[5055]: E1011 07:11:49.503555 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9" podUID="c06ae024-6af2-4435-b90b-d8c16d62aaf9" Oct 11 07:11:49 crc kubenswrapper[5055]: I1011 07:11:49.549546 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" event={"ID":"4b9b0bd4-6cb3-4970-823f-c942d9567b64","Type":"ContainerStarted","Data":"f4466d5a8bb4f8e3a3ab0ffb3b503a0a413a74340cf544675d82ba4da28e3cdd"} Oct 11 07:11:49 crc kubenswrapper[5055]: I1011 07:11:49.549593 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" event={"ID":"4b9b0bd4-6cb3-4970-823f-c942d9567b64","Type":"ContainerStarted","Data":"7e7fde021fea49ac09f673c94bd691b1d0449c863d742797028be244f9658864"} Oct 11 07:11:49 crc kubenswrapper[5055]: E1011 07:11:49.580304 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:a17fc270857869fd1efe5020b2a1cb8c2abbd838f08de88f3a6a59e8754ec351\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" podUID="4b9b0bd4-6cb3-4970-823f-c942d9567b64" Oct 11 07:11:49 crc kubenswrapper[5055]: I1011 07:11:49.583314 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" event={"ID":"5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5","Type":"ContainerStarted","Data":"bb7e585da992b1f1eff529bd44d2fc9b14815f57741bbfa8a5d8a5eabe387d47"} Oct 11 07:11:49 crc kubenswrapper[5055]: I1011 07:11:49.583355 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" event={"ID":"5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5","Type":"ContainerStarted","Data":"41e25b1a2fc122a5e576cdf89245be037bc1f68687ee874dd1e217a4a0e668bc"} Oct 11 07:11:49 crc kubenswrapper[5055]: E1011 07:11:49.586500 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:5cfb2ae1092445950b39dd59caa9a8c9367f42fb8353a8c3848d3bc729f24492\\\"\"" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" podUID="5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5" Oct 11 07:11:49 crc kubenswrapper[5055]: I1011 07:11:49.592414 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5458f77c4-brxz6" event={"ID":"587a08b9-a65b-43fc-9d11-80568dc93742","Type":"ContainerStarted","Data":"277ddd6625cf89dfe9a59e12c06d0b8b5421d4e2f828ed7ea2bc8e2af5f0a1d9"} Oct 11 07:11:49 crc kubenswrapper[5055]: I1011 07:11:49.592468 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5458f77c4-brxz6" event={"ID":"587a08b9-a65b-43fc-9d11-80568dc93742","Type":"ContainerStarted","Data":"37b881ff363ba9f709d1190ab874dda3aa3ed0130c216466f657aa8018e0ee63"} Oct 11 07:11:49 crc kubenswrapper[5055]: E1011 07:11:49.594646 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:7e584b1c430441c8b6591dadeff32e065de8a185ad37ef90d2e08d37e59aab4a\\\"\"" pod="openstack-operators/test-operator-controller-manager-5458f77c4-brxz6" podUID="587a08b9-a65b-43fc-9d11-80568dc93742" Oct 11 07:11:49 crc kubenswrapper[5055]: I1011 07:11:49.595774 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-lf2lh" event={"ID":"8cac90ea-fff7-4955-89bf-984d1c0f3094","Type":"ContainerStarted","Data":"ead1723630b7d325241633b3d2caed1c36372039cff131944f5e7af447813978"} Oct 11 07:11:49 crc kubenswrapper[5055]: I1011 07:11:49.598425 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-p6cwz" event={"ID":"8e44a30e-0e4b-4560-b22a-755a8ae3af75","Type":"ContainerStarted","Data":"ed07b823324072c297e040c29f232a903f55a65cb43a0954655a4ab8e5dc16a8"} Oct 11 07:11:49 crc kubenswrapper[5055]: I1011 07:11:49.601896 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-5df598886f-54hjb" event={"ID":"2e808412-35a4-431c-8ea1-96e0730a48bb","Type":"ContainerStarted","Data":"2ceed94c4e43a817a452d597987ada9ff5ab7322cfbc783caf0bc133c6f0496d"} Oct 11 07:11:49 crc kubenswrapper[5055]: I1011 07:11:49.601941 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-5df598886f-54hjb" event={"ID":"2e808412-35a4-431c-8ea1-96e0730a48bb","Type":"ContainerStarted","Data":"29f945c9cf930a7c8c63fd67274a403fd9dfbe0f04655c918e376e99ae506aca"} Oct 11 07:11:49 crc kubenswrapper[5055]: E1011 07:11:49.608219 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:b2e9acf568a48c28cf2aed6012e432eeeb7d5f0eb11878fc91b62bc34cba10cd\\\"\"" pod="openstack-operators/nova-operator-controller-manager-5df598886f-54hjb" podUID="2e808412-35a4-431c-8ea1-96e0730a48bb" Oct 11 07:11:49 crc kubenswrapper[5055]: I1011 07:11:49.620561 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" event={"ID":"a027701a-75f2-4640-a841-11dab6a3d4f9","Type":"ContainerStarted","Data":"d4e7d783ed2a75c83c52696fa9f033d01b52b8dec0342dd976d7e6a02209c360"} Oct 11 07:11:49 crc kubenswrapper[5055]: I1011 07:11:49.620600 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" event={"ID":"a027701a-75f2-4640-a841-11dab6a3d4f9","Type":"ContainerStarted","Data":"26e9bc82767bac5473d7294d01a84421d41bdeeb40a2d82b7f0d4a630f981c30"} Oct 11 07:11:50 crc kubenswrapper[5055]: I1011 07:11:50.645453 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" event={"ID":"a027701a-75f2-4640-a841-11dab6a3d4f9","Type":"ContainerStarted","Data":"4bf6a1b8ce82c587a681b70887b8c5ffc0e24bd48c37dee42ed6148f6d7d8571"} Oct 11 07:11:50 crc kubenswrapper[5055]: I1011 07:11:50.646459 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" Oct 11 07:11:50 crc kubenswrapper[5055]: E1011 07:11:50.649530 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:5cfb2ae1092445950b39dd59caa9a8c9367f42fb8353a8c3848d3bc729f24492\\\"\"" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" podUID="5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5" Oct 11 07:11:50 crc kubenswrapper[5055]: E1011 07:11:50.649974 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:b2e9acf568a48c28cf2aed6012e432eeeb7d5f0eb11878fc91b62bc34cba10cd\\\"\"" pod="openstack-operators/nova-operator-controller-manager-5df598886f-54hjb" podUID="2e808412-35a4-431c-8ea1-96e0730a48bb" Oct 11 07:11:50 crc kubenswrapper[5055]: E1011 07:11:50.650017 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:7e584b1c430441c8b6591dadeff32e065de8a185ad37ef90d2e08d37e59aab4a\\\"\"" pod="openstack-operators/test-operator-controller-manager-5458f77c4-brxz6" podUID="587a08b9-a65b-43fc-9d11-80568dc93742" Oct 11 07:11:50 crc kubenswrapper[5055]: E1011 07:11:50.650354 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:a17fc270857869fd1efe5020b2a1cb8c2abbd838f08de88f3a6a59e8754ec351\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" podUID="4b9b0bd4-6cb3-4970-823f-c942d9567b64" Oct 11 07:11:50 crc kubenswrapper[5055]: E1011 07:11:50.657025 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9" podUID="c06ae024-6af2-4435-b90b-d8c16d62aaf9" Oct 11 07:11:50 crc kubenswrapper[5055]: I1011 07:11:50.691226 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" podStartSLOduration=4.691207869 podStartE2EDuration="4.691207869s" podCreationTimestamp="2025-10-11 07:11:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:11:50.687039111 +0000 UTC m=+1094.461312918" watchObservedRunningTime="2025-10-11 07:11:50.691207869 +0000 UTC m=+1094.465481676" Oct 11 07:11:58 crc kubenswrapper[5055]: I1011 07:11:58.522902 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-5b95c8954b-kwb47" Oct 11 07:12:00 crc kubenswrapper[5055]: I1011 07:12:00.727620 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-gxs22" event={"ID":"d0538072-f53e-4d2a-858e-667657ffd09e","Type":"ContainerStarted","Data":"72824cf0a5096dc2869e5b78d15a98c267407040e566910ccf1c6556207440d4"} Oct 11 07:12:00 crc kubenswrapper[5055]: I1011 07:12:00.729748 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-zmrmw" event={"ID":"cb2cd76d-3803-48d6-8882-9b895be3c494","Type":"ContainerStarted","Data":"feb801917219fce7b3c5b952184868ab2cb26be24a08fd1947fbd319c6f38b7a"} Oct 11 07:12:00 crc kubenswrapper[5055]: I1011 07:12:00.736035 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-wm2wn" event={"ID":"86f57e52-de78-48ca-86ec-69286b53726c","Type":"ContainerStarted","Data":"ed4f90295533b65b4fba1bd9621afb3eae7d9fda186f8a0bd92d0ee936d2de79"} Oct 11 07:12:00 crc kubenswrapper[5055]: I1011 07:12:00.746775 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-qhkjb" event={"ID":"19a5d205-97f5-41fa-a9df-578a2c60549f","Type":"ContainerStarted","Data":"9159cc99c7d718f8eea6682885327c759123fa3c2bcff768aa5d9e374e352aaf"} Oct 11 07:12:00 crc kubenswrapper[5055]: I1011 07:12:00.755112 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-5ngjz" event={"ID":"b3d55b74-0882-459a-be9a-2da659337819","Type":"ContainerStarted","Data":"10596b9b7892654e1855e19a5508da250a51ce2cf92d5f811e44147e2d52e83d"} Oct 11 07:12:00 crc kubenswrapper[5055]: I1011 07:12:00.758468 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-9qdps" event={"ID":"fc32b680-c849-4967-9a78-49c724018aa5","Type":"ContainerStarted","Data":"3c66938a60dd753ccbb068bff3ef0ae3661e298a48a96f0eef950e9614bc529a"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.766509 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-9qdps" event={"ID":"fc32b680-c849-4967-9a78-49c724018aa5","Type":"ContainerStarted","Data":"c70de82e3c2c0d45b673097ca68423414af1642db7d279ff92b4d5683e022811"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.767412 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-9qdps" Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.768425 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-p6cwz" event={"ID":"8e44a30e-0e4b-4560-b22a-755a8ae3af75","Type":"ContainerStarted","Data":"ea3e7de80acb3afb58f1787812bb8cf2d90a2e7f317e44887a933a34e3037666"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.774286 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-v7jqb" event={"ID":"8e18a79a-705e-467e-b7cd-0fc0a90f1a7a","Type":"ContainerStarted","Data":"e2c771eae9d762d20e64d8ef046bdc878649b86368d5a868014d3afdd9d14704"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.774985 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-v7jqb" Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.783103 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-xwbdv" event={"ID":"a7abb018-37e9-4d57-90bf-e6333f7d252c","Type":"ContainerStarted","Data":"97426f0111e636102bfe0474f06ec1a82ce1c9aa40da9b078615bf3ce55909c6"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.784749 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-qhkjb" event={"ID":"19a5d205-97f5-41fa-a9df-578a2c60549f","Type":"ContainerStarted","Data":"819f10999a7598e4b8f9f32a5d97a1ebfc4f288930ced21cf33cb863696e7ff2"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.785406 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-qhkjb" Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.793433 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-5ngjz" event={"ID":"b3d55b74-0882-459a-be9a-2da659337819","Type":"ContainerStarted","Data":"4cf39662fad41d788a6a58dec98dad405adc5488cad853bef50647e18b404735"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.793581 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-5ngjz" Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.797361 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-lf2lh" event={"ID":"8cac90ea-fff7-4955-89bf-984d1c0f3094","Type":"ContainerStarted","Data":"d473228d20b34be4b72eef676815e644f91d097bf6d44697730225a1731422f9"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.799595 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-wm2wn" event={"ID":"86f57e52-de78-48ca-86ec-69286b53726c","Type":"ContainerStarted","Data":"b6de24be8066f52eb4b576f280fde5af6ad8897a559b525f236fe01c3f8a87e4"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.800183 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-wm2wn" Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.802360 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-jsbhj" event={"ID":"a711f3ae-fd01-49ab-8349-18c12be42a20","Type":"ContainerStarted","Data":"1f45137359dc7eead97342a887fdc2671efb8ab821bcc346b854886d89c86109"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.803875 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-bdvrf" event={"ID":"ea4aab30-444b-4efe-a76d-bcd6c673fffe","Type":"ContainerStarted","Data":"a58a6bd9f68b76eefd4d3d9d5756d896fdc7210321238cd25df124ace9d56724"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.811013 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-lqf4k" event={"ID":"83d0866a-b853-4873-bf7e-8000c2f7a63a","Type":"ContainerStarted","Data":"e99eb1cc8ca5a97f55bb222fe6a3cee86d516b2b84e5aa4629b34f8b058ca6dd"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.816387 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-5zhf2" event={"ID":"c6fd3cbf-29c5-4820-8936-2203cfa34345","Type":"ContainerStarted","Data":"44ae2badbee64a1eebb3aa34545862f1cebb3bf6d7a4143df879549b320ae24a"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.816422 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-5zhf2" event={"ID":"c6fd3cbf-29c5-4820-8936-2203cfa34345","Type":"ContainerStarted","Data":"84f567bb9e012b6674d2d79cb059585dcc5ed29d8acb9b4ddbbf98be45cd8cd4"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.816696 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-5zhf2" Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.819324 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-spd9v" event={"ID":"ae29373a-08c3-40d4-8f74-b92069a970bf","Type":"ContainerStarted","Data":"9dfe4e38111fde0b757e0e048c19a80e67645effc04efa788d903bd72174a849"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.819479 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-spd9v" Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.825157 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-nmq2v" event={"ID":"d62acace-7cb6-44e0-8b54-761a9cbd9cfe","Type":"ContainerStarted","Data":"534cdce42c8a5e8666e68858a5bae31866f30a5c39959369d070e144b77911d4"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.826103 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-9qdps" podStartSLOduration=4.093820428 podStartE2EDuration="16.826086619s" podCreationTimestamp="2025-10-11 07:11:45 +0000 UTC" firstStartedPulling="2025-10-11 07:11:47.443887883 +0000 UTC m=+1091.218161690" lastFinishedPulling="2025-10-11 07:12:00.176154074 +0000 UTC m=+1103.950427881" observedRunningTime="2025-10-11 07:12:01.792316052 +0000 UTC m=+1105.566589859" watchObservedRunningTime="2025-10-11 07:12:01.826086619 +0000 UTC m=+1105.600360426" Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.826526 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-lw5s2" event={"ID":"d3bd380a-8fb6-4323-a11d-85b5c5123276","Type":"ContainerStarted","Data":"abe9c83bc58c1cfa3ac97246cb176dae4571efcb577127fd3cf6d6ebb1390bc0"} Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.829260 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-v7jqb" podStartSLOduration=3.738830472 podStartE2EDuration="16.829247148s" podCreationTimestamp="2025-10-11 07:11:45 +0000 UTC" firstStartedPulling="2025-10-11 07:11:47.081366764 +0000 UTC m=+1090.855640571" lastFinishedPulling="2025-10-11 07:12:00.17178344 +0000 UTC m=+1103.946057247" observedRunningTime="2025-10-11 07:12:01.824671849 +0000 UTC m=+1105.598945656" watchObservedRunningTime="2025-10-11 07:12:01.829247148 +0000 UTC m=+1105.603520955" Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.858818 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-qhkjb" podStartSLOduration=4.059962781 podStartE2EDuration="16.858748224s" podCreationTimestamp="2025-10-11 07:11:45 +0000 UTC" firstStartedPulling="2025-10-11 07:11:47.360686078 +0000 UTC m=+1091.134959885" lastFinishedPulling="2025-10-11 07:12:00.159471521 +0000 UTC m=+1103.933745328" observedRunningTime="2025-10-11 07:12:01.848616537 +0000 UTC m=+1105.622890344" watchObservedRunningTime="2025-10-11 07:12:01.858748224 +0000 UTC m=+1105.633022031" Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.872351 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-spd9v" podStartSLOduration=4.62201903 podStartE2EDuration="16.872333249s" podCreationTimestamp="2025-10-11 07:11:45 +0000 UTC" firstStartedPulling="2025-10-11 07:11:47.963918084 +0000 UTC m=+1091.738191891" lastFinishedPulling="2025-10-11 07:12:00.214232303 +0000 UTC m=+1103.988506110" observedRunningTime="2025-10-11 07:12:01.871018582 +0000 UTC m=+1105.645292389" watchObservedRunningTime="2025-10-11 07:12:01.872333249 +0000 UTC m=+1105.646607056" Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.904838 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-5ngjz" podStartSLOduration=3.544534861 podStartE2EDuration="16.90482126s" podCreationTimestamp="2025-10-11 07:11:45 +0000 UTC" firstStartedPulling="2025-10-11 07:11:46.775696737 +0000 UTC m=+1090.549970544" lastFinishedPulling="2025-10-11 07:12:00.135983136 +0000 UTC m=+1103.910256943" observedRunningTime="2025-10-11 07:12:01.902080492 +0000 UTC m=+1105.676354299" watchObservedRunningTime="2025-10-11 07:12:01.90482126 +0000 UTC m=+1105.679095067" Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.928108 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-wm2wn" podStartSLOduration=3.649016846 podStartE2EDuration="15.928093359s" podCreationTimestamp="2025-10-11 07:11:46 +0000 UTC" firstStartedPulling="2025-10-11 07:11:47.892042698 +0000 UTC m=+1091.666316505" lastFinishedPulling="2025-10-11 07:12:00.171119211 +0000 UTC m=+1103.945393018" observedRunningTime="2025-10-11 07:12:01.92355559 +0000 UTC m=+1105.697829387" watchObservedRunningTime="2025-10-11 07:12:01.928093359 +0000 UTC m=+1105.702367166" Oct 11 07:12:01 crc kubenswrapper[5055]: I1011 07:12:01.950549 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-5zhf2" podStartSLOduration=4.043064447 podStartE2EDuration="16.950533725s" podCreationTimestamp="2025-10-11 07:11:45 +0000 UTC" firstStartedPulling="2025-10-11 07:11:47.257264872 +0000 UTC m=+1091.031538679" lastFinishedPulling="2025-10-11 07:12:00.16473415 +0000 UTC m=+1103.939007957" observedRunningTime="2025-10-11 07:12:01.946568562 +0000 UTC m=+1105.720842369" watchObservedRunningTime="2025-10-11 07:12:01.950533725 +0000 UTC m=+1105.724807532" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.422113 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.422543 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.839250 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-jsbhj" event={"ID":"a711f3ae-fd01-49ab-8349-18c12be42a20","Type":"ContainerStarted","Data":"04df230a68df8ee59120c083f2fe4a1d8d26595e8483a93329088b51b8a5c354"} Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.839398 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-jsbhj" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.844114 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-v7jqb" event={"ID":"8e18a79a-705e-467e-b7cd-0fc0a90f1a7a","Type":"ContainerStarted","Data":"1ad742a89ee90e2859cd4ab7899c583caf10d0de517321f731d6596b8f1df5e7"} Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.846143 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-lf2lh" event={"ID":"8cac90ea-fff7-4955-89bf-984d1c0f3094","Type":"ContainerStarted","Data":"b804e07b3ec1e6e2c23f397efd72a470496cafe5dbba3d9cdabfe9725cfa431f"} Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.846941 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-lf2lh" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.849474 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-gxs22" event={"ID":"d0538072-f53e-4d2a-858e-667657ffd09e","Type":"ContainerStarted","Data":"fb0d3bc9133d7ed4135ddd0d01252164126192b9beb1be35c4498f568c8aba71"} Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.849760 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-gxs22" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.855999 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-spd9v" event={"ID":"ae29373a-08c3-40d4-8f74-b92069a970bf","Type":"ContainerStarted","Data":"fb78b00df2a082bbee4f39b394edc7c95f5d257f4b9eecb4cff82e9b230f1d51"} Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.857536 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-jsbhj" podStartSLOduration=5.260399989 podStartE2EDuration="17.857518481s" podCreationTimestamp="2025-10-11 07:11:45 +0000 UTC" firstStartedPulling="2025-10-11 07:11:47.589368775 +0000 UTC m=+1091.363642592" lastFinishedPulling="2025-10-11 07:12:00.186487257 +0000 UTC m=+1103.960761084" observedRunningTime="2025-10-11 07:12:02.853919319 +0000 UTC m=+1106.628193126" watchObservedRunningTime="2025-10-11 07:12:02.857518481 +0000 UTC m=+1106.631792288" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.861598 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-p6cwz" event={"ID":"8e44a30e-0e4b-4560-b22a-755a8ae3af75","Type":"ContainerStarted","Data":"b3d97e00a18caefe10a81a8e25f451473d1eafc3879ba7228500ff7911d620f8"} Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.861686 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-p6cwz" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.866699 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-xwbdv" event={"ID":"a7abb018-37e9-4d57-90bf-e6333f7d252c","Type":"ContainerStarted","Data":"7876b93d30d4e91e73b69dc8343ff1b591a03f64938304475583553692d91114"} Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.867209 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-xwbdv" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.873796 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-lf2lh" podStartSLOduration=5.048775368 podStartE2EDuration="16.873759611s" podCreationTimestamp="2025-10-11 07:11:46 +0000 UTC" firstStartedPulling="2025-10-11 07:11:48.35112818 +0000 UTC m=+1092.125401987" lastFinishedPulling="2025-10-11 07:12:00.176112423 +0000 UTC m=+1103.950386230" observedRunningTime="2025-10-11 07:12:02.869572943 +0000 UTC m=+1106.643846750" watchObservedRunningTime="2025-10-11 07:12:02.873759611 +0000 UTC m=+1106.648033418" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.875659 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-nmq2v" event={"ID":"d62acace-7cb6-44e0-8b54-761a9cbd9cfe","Type":"ContainerStarted","Data":"52d08c47dc725cf06c64196f224485401778ae91d8e0bfeff5f0b363478bc3ff"} Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.875795 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-nmq2v" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.877800 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-bdvrf" event={"ID":"ea4aab30-444b-4efe-a76d-bcd6c673fffe","Type":"ContainerStarted","Data":"ca7cc6d1442c9e31cef2555c66188c8e73b0da5bfaf00e8e08c93ddf37652ee4"} Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.877924 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-bdvrf" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.880444 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-lw5s2" event={"ID":"d3bd380a-8fb6-4323-a11d-85b5c5123276","Type":"ContainerStarted","Data":"66e5f634f60e3547e710c25c3dcd6eb6d29113ddcb18e801ab25820f23aad588"} Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.880595 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-lw5s2" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.895877 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-gxs22" podStartSLOduration=4.9734641360000005 podStartE2EDuration="17.895859187s" podCreationTimestamp="2025-10-11 07:11:45 +0000 UTC" firstStartedPulling="2025-10-11 07:11:47.25435722 +0000 UTC m=+1091.028631037" lastFinishedPulling="2025-10-11 07:12:00.176752281 +0000 UTC m=+1103.951026088" observedRunningTime="2025-10-11 07:12:02.892450991 +0000 UTC m=+1106.666724818" watchObservedRunningTime="2025-10-11 07:12:02.895859187 +0000 UTC m=+1106.670132994" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.899604 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-lqf4k" event={"ID":"83d0866a-b853-4873-bf7e-8000c2f7a63a","Type":"ContainerStarted","Data":"2a01b255ae3c3a2a1270ee5808c9aafa0a52299c42aa8d41911a01b6fb65e4ea"} Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.899746 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-lqf4k" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.922949 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-zmrmw" event={"ID":"cb2cd76d-3803-48d6-8882-9b895be3c494","Type":"ContainerStarted","Data":"55eff78af8c933c803cc32470544df8a7b82d980bcffd5be9786a8eaf35d4e2e"} Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.946360 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-bdvrf" podStartSLOduration=4.902533878 podStartE2EDuration="17.946344198s" podCreationTimestamp="2025-10-11 07:11:45 +0000 UTC" firstStartedPulling="2025-10-11 07:11:47.108699664 +0000 UTC m=+1090.882973471" lastFinishedPulling="2025-10-11 07:12:00.152509984 +0000 UTC m=+1103.926783791" observedRunningTime="2025-10-11 07:12:02.9121941 +0000 UTC m=+1106.686467907" watchObservedRunningTime="2025-10-11 07:12:02.946344198 +0000 UTC m=+1106.720618005" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.949469 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-nmq2v" podStartSLOduration=5.66603755 podStartE2EDuration="17.949461456s" podCreationTimestamp="2025-10-11 07:11:45 +0000 UTC" firstStartedPulling="2025-10-11 07:11:47.888331283 +0000 UTC m=+1091.662605090" lastFinishedPulling="2025-10-11 07:12:00.171755189 +0000 UTC m=+1103.946028996" observedRunningTime="2025-10-11 07:12:02.929616734 +0000 UTC m=+1106.703890541" watchObservedRunningTime="2025-10-11 07:12:02.949461456 +0000 UTC m=+1106.723735263" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.951636 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-lw5s2" podStartSLOduration=4.681697509 podStartE2EDuration="16.951629338s" podCreationTimestamp="2025-10-11 07:11:46 +0000 UTC" firstStartedPulling="2025-10-11 07:11:47.926043326 +0000 UTC m=+1091.700317133" lastFinishedPulling="2025-10-11 07:12:00.195975155 +0000 UTC m=+1103.970248962" observedRunningTime="2025-10-11 07:12:02.943844537 +0000 UTC m=+1106.718118344" watchObservedRunningTime="2025-10-11 07:12:02.951629338 +0000 UTC m=+1106.725903145" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.970186 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-xwbdv" podStartSLOduration=4.758231427 podStartE2EDuration="16.970164903s" podCreationTimestamp="2025-10-11 07:11:46 +0000 UTC" firstStartedPulling="2025-10-11 07:11:47.996481322 +0000 UTC m=+1091.770755129" lastFinishedPulling="2025-10-11 07:12:00.208414768 +0000 UTC m=+1103.982688605" observedRunningTime="2025-10-11 07:12:02.96302129 +0000 UTC m=+1106.737295107" watchObservedRunningTime="2025-10-11 07:12:02.970164903 +0000 UTC m=+1106.744438720" Oct 11 07:12:02 crc kubenswrapper[5055]: I1011 07:12:02.976989 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-p6cwz" podStartSLOduration=5.929743334 podStartE2EDuration="17.976971426s" podCreationTimestamp="2025-10-11 07:11:45 +0000 UTC" firstStartedPulling="2025-10-11 07:11:48.224300164 +0000 UTC m=+1091.998573971" lastFinishedPulling="2025-10-11 07:12:00.271528256 +0000 UTC m=+1104.045802063" observedRunningTime="2025-10-11 07:12:02.975803472 +0000 UTC m=+1106.750077289" watchObservedRunningTime="2025-10-11 07:12:02.976971426 +0000 UTC m=+1106.751245223" Oct 11 07:12:03 crc kubenswrapper[5055]: I1011 07:12:03.004138 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-lqf4k" podStartSLOduration=4.748742932 podStartE2EDuration="17.004120335s" podCreationTimestamp="2025-10-11 07:11:46 +0000 UTC" firstStartedPulling="2025-10-11 07:11:47.965522789 +0000 UTC m=+1091.739796596" lastFinishedPulling="2025-10-11 07:12:00.220900192 +0000 UTC m=+1103.995173999" observedRunningTime="2025-10-11 07:12:03.003356953 +0000 UTC m=+1106.777630760" watchObservedRunningTime="2025-10-11 07:12:03.004120335 +0000 UTC m=+1106.778394142" Oct 11 07:12:03 crc kubenswrapper[5055]: I1011 07:12:03.031512 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-zmrmw" podStartSLOduration=4.963909686 podStartE2EDuration="18.03149021s" podCreationTimestamp="2025-10-11 07:11:45 +0000 UTC" firstStartedPulling="2025-10-11 07:11:47.108500108 +0000 UTC m=+1090.882773915" lastFinishedPulling="2025-10-11 07:12:00.176080632 +0000 UTC m=+1103.950354439" observedRunningTime="2025-10-11 07:12:03.025579413 +0000 UTC m=+1106.799853220" watchObservedRunningTime="2025-10-11 07:12:03.03149021 +0000 UTC m=+1106.805764027" Oct 11 07:12:03 crc kubenswrapper[5055]: I1011 07:12:03.929930 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-zmrmw" Oct 11 07:12:04 crc kubenswrapper[5055]: I1011 07:12:04.936682 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5458f77c4-brxz6" event={"ID":"587a08b9-a65b-43fc-9d11-80568dc93742","Type":"ContainerStarted","Data":"adba74d6b11ed912aee29f1c98576f0f0e0442526b31ac99f38ac2822546f29d"} Oct 11 07:12:04 crc kubenswrapper[5055]: I1011 07:12:04.937276 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5458f77c4-brxz6" Oct 11 07:12:04 crc kubenswrapper[5055]: I1011 07:12:04.964839 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5458f77c4-brxz6" podStartSLOduration=2.539194041 podStartE2EDuration="18.964821654s" podCreationTimestamp="2025-10-11 07:11:46 +0000 UTC" firstStartedPulling="2025-10-11 07:11:48.251152511 +0000 UTC m=+1092.025426318" lastFinishedPulling="2025-10-11 07:12:04.676780124 +0000 UTC m=+1108.451053931" observedRunningTime="2025-10-11 07:12:04.960248375 +0000 UTC m=+1108.734522192" watchObservedRunningTime="2025-10-11 07:12:04.964821654 +0000 UTC m=+1108.739095461" Oct 11 07:12:05 crc kubenswrapper[5055]: I1011 07:12:05.946177 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" event={"ID":"4b9b0bd4-6cb3-4970-823f-c942d9567b64","Type":"ContainerStarted","Data":"ae67aaf0a84c17991061d72684db6ecd8276e6675cfcc07638a4a25d9f4e55e2"} Oct 11 07:12:05 crc kubenswrapper[5055]: I1011 07:12:05.946690 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" Oct 11 07:12:05 crc kubenswrapper[5055]: I1011 07:12:05.948279 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-5df598886f-54hjb" event={"ID":"2e808412-35a4-431c-8ea1-96e0730a48bb","Type":"ContainerStarted","Data":"fc67de15f7d782bbdba2fc4299f2ffd75246f968dfbf9e24e678a91dd7747f8c"} Oct 11 07:12:05 crc kubenswrapper[5055]: I1011 07:12:05.948421 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-5df598886f-54hjb" Oct 11 07:12:05 crc kubenswrapper[5055]: I1011 07:12:05.949878 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9" event={"ID":"c06ae024-6af2-4435-b90b-d8c16d62aaf9","Type":"ContainerStarted","Data":"fa8f21b153599e2d660de65ac183deea79156949a88f076cd40d20acfedf6e39"} Oct 11 07:12:05 crc kubenswrapper[5055]: I1011 07:12:05.967873 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-658bdf4b74-5ngjz" Oct 11 07:12:05 crc kubenswrapper[5055]: I1011 07:12:05.980385 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" podStartSLOduration=3.690815619 podStartE2EDuration="19.980368176s" podCreationTimestamp="2025-10-11 07:11:46 +0000 UTC" firstStartedPulling="2025-10-11 07:11:48.387167146 +0000 UTC m=+1092.161440943" lastFinishedPulling="2025-10-11 07:12:04.676719693 +0000 UTC m=+1108.450993500" observedRunningTime="2025-10-11 07:12:05.976163987 +0000 UTC m=+1109.750437794" watchObservedRunningTime="2025-10-11 07:12:05.980368176 +0000 UTC m=+1109.754641983" Oct 11 07:12:05 crc kubenswrapper[5055]: I1011 07:12:05.989265 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-7b7fb68549-v7jqb" Oct 11 07:12:05 crc kubenswrapper[5055]: I1011 07:12:05.995587 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-5df598886f-54hjb" podStartSLOduration=4.497766969 podStartE2EDuration="20.995572037s" podCreationTimestamp="2025-10-11 07:11:45 +0000 UTC" firstStartedPulling="2025-10-11 07:11:48.251169112 +0000 UTC m=+1092.025442919" lastFinishedPulling="2025-10-11 07:12:04.74897418 +0000 UTC m=+1108.523247987" observedRunningTime="2025-10-11 07:12:05.991868642 +0000 UTC m=+1109.766142459" watchObservedRunningTime="2025-10-11 07:12:05.995572037 +0000 UTC m=+1109.769845844" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.012646 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-84b9b84486-bdvrf" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.016403 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-85d5d9dd78-zmrmw" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.043521 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-858f76bbdd-gxs22" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.046086 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9" podStartSLOduration=3.685903864 podStartE2EDuration="20.046065738s" podCreationTimestamp="2025-10-11 07:11:46 +0000 UTC" firstStartedPulling="2025-10-11 07:11:48.363747826 +0000 UTC m=+1092.138021633" lastFinishedPulling="2025-10-11 07:12:04.7239097 +0000 UTC m=+1108.498183507" observedRunningTime="2025-10-11 07:12:06.037822794 +0000 UTC m=+1109.812096601" watchObservedRunningTime="2025-10-11 07:12:06.046065738 +0000 UTC m=+1109.820339535" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.119651 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-7ffbcb7588-5zhf2" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.450290 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5f67fbc655-9qdps" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.505011 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-9c5c78d49-jsbhj" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.552249 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-f9fb45f8f-qhkjb" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.552859 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-55b6b7c7b8-nmq2v" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.572918 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-79d585cb66-p6cwz" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.631299 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-69fdcfc5f5-spd9v" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.680045 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-79df5fb58c-lw5s2" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.745010 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-68b6c87b68-xwbdv" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.848872 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-db6d7f97b-lqf4k" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.873157 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-67cfc6749b-wm2wn" Oct 11 07:12:06 crc kubenswrapper[5055]: I1011 07:12:06.935611 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-7f554bff7b-lf2lh" Oct 11 07:12:08 crc kubenswrapper[5055]: I1011 07:12:08.973468 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" event={"ID":"5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5","Type":"ContainerStarted","Data":"99835e27ab0c4ba022a2c567141caabeaa297415c96084bbd24e963b64290c08"} Oct 11 07:12:08 crc kubenswrapper[5055]: I1011 07:12:08.974202 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" Oct 11 07:12:08 crc kubenswrapper[5055]: I1011 07:12:08.999432 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" podStartSLOduration=4.669975468 podStartE2EDuration="23.999411099s" podCreationTimestamp="2025-10-11 07:11:45 +0000 UTC" firstStartedPulling="2025-10-11 07:11:48.460954426 +0000 UTC m=+1092.235228233" lastFinishedPulling="2025-10-11 07:12:07.790390057 +0000 UTC m=+1111.564663864" observedRunningTime="2025-10-11 07:12:08.992963376 +0000 UTC m=+1112.767237183" watchObservedRunningTime="2025-10-11 07:12:08.999411099 +0000 UTC m=+1112.773684906" Oct 11 07:12:16 crc kubenswrapper[5055]: I1011 07:12:16.884213 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-5df598886f-54hjb" Oct 11 07:12:16 crc kubenswrapper[5055]: I1011 07:12:16.891702 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5458f77c4-brxz6" Oct 11 07:12:17 crc kubenswrapper[5055]: I1011 07:12:17.307592 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn" Oct 11 07:12:17 crc kubenswrapper[5055]: I1011 07:12:17.693430 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-656bcbd775-rk8j7" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.399484 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bfcb9d745-xpmhd"] Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.401343 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfcb9d745-xpmhd" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.403983 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.404015 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.404143 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.404208 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-4nsbs" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.415871 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bfcb9d745-xpmhd"] Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.423803 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.423866 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.423910 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.424458 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"535dac1f0973003af975be73a91e8fd4d0110e656b0a5fa5041beaefb3cb8f58"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.424514 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://535dac1f0973003af975be73a91e8fd4d0110e656b0a5fa5041beaefb3cb8f58" gracePeriod=600 Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.452862 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-758b79db4c-lglwz"] Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.454115 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-758b79db4c-lglwz" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.458864 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.473086 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-758b79db4c-lglwz"] Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.559374 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6nvl\" (UniqueName: \"kubernetes.io/projected/93c4099c-0bd6-4786-a9f3-15f023e50ff9-kube-api-access-d6nvl\") pod \"dnsmasq-dns-7bfcb9d745-xpmhd\" (UID: \"93c4099c-0bd6-4786-a9f3-15f023e50ff9\") " pod="openstack/dnsmasq-dns-7bfcb9d745-xpmhd" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.559442 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nf655\" (UniqueName: \"kubernetes.io/projected/443266d5-179a-4196-9db8-e41cd5c3046b-kube-api-access-nf655\") pod \"dnsmasq-dns-758b79db4c-lglwz\" (UID: \"443266d5-179a-4196-9db8-e41cd5c3046b\") " pod="openstack/dnsmasq-dns-758b79db4c-lglwz" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.559464 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/443266d5-179a-4196-9db8-e41cd5c3046b-config\") pod \"dnsmasq-dns-758b79db4c-lglwz\" (UID: \"443266d5-179a-4196-9db8-e41cd5c3046b\") " pod="openstack/dnsmasq-dns-758b79db4c-lglwz" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.559706 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93c4099c-0bd6-4786-a9f3-15f023e50ff9-config\") pod \"dnsmasq-dns-7bfcb9d745-xpmhd\" (UID: \"93c4099c-0bd6-4786-a9f3-15f023e50ff9\") " pod="openstack/dnsmasq-dns-7bfcb9d745-xpmhd" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.559744 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/443266d5-179a-4196-9db8-e41cd5c3046b-dns-svc\") pod \"dnsmasq-dns-758b79db4c-lglwz\" (UID: \"443266d5-179a-4196-9db8-e41cd5c3046b\") " pod="openstack/dnsmasq-dns-758b79db4c-lglwz" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.660580 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nf655\" (UniqueName: \"kubernetes.io/projected/443266d5-179a-4196-9db8-e41cd5c3046b-kube-api-access-nf655\") pod \"dnsmasq-dns-758b79db4c-lglwz\" (UID: \"443266d5-179a-4196-9db8-e41cd5c3046b\") " pod="openstack/dnsmasq-dns-758b79db4c-lglwz" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.660632 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/443266d5-179a-4196-9db8-e41cd5c3046b-config\") pod \"dnsmasq-dns-758b79db4c-lglwz\" (UID: \"443266d5-179a-4196-9db8-e41cd5c3046b\") " pod="openstack/dnsmasq-dns-758b79db4c-lglwz" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.660690 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93c4099c-0bd6-4786-a9f3-15f023e50ff9-config\") pod \"dnsmasq-dns-7bfcb9d745-xpmhd\" (UID: \"93c4099c-0bd6-4786-a9f3-15f023e50ff9\") " pod="openstack/dnsmasq-dns-7bfcb9d745-xpmhd" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.660729 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/443266d5-179a-4196-9db8-e41cd5c3046b-dns-svc\") pod \"dnsmasq-dns-758b79db4c-lglwz\" (UID: \"443266d5-179a-4196-9db8-e41cd5c3046b\") " pod="openstack/dnsmasq-dns-758b79db4c-lglwz" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.660799 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6nvl\" (UniqueName: \"kubernetes.io/projected/93c4099c-0bd6-4786-a9f3-15f023e50ff9-kube-api-access-d6nvl\") pod \"dnsmasq-dns-7bfcb9d745-xpmhd\" (UID: \"93c4099c-0bd6-4786-a9f3-15f023e50ff9\") " pod="openstack/dnsmasq-dns-7bfcb9d745-xpmhd" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.662614 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/443266d5-179a-4196-9db8-e41cd5c3046b-dns-svc\") pod \"dnsmasq-dns-758b79db4c-lglwz\" (UID: \"443266d5-179a-4196-9db8-e41cd5c3046b\") " pod="openstack/dnsmasq-dns-758b79db4c-lglwz" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.662625 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93c4099c-0bd6-4786-a9f3-15f023e50ff9-config\") pod \"dnsmasq-dns-7bfcb9d745-xpmhd\" (UID: \"93c4099c-0bd6-4786-a9f3-15f023e50ff9\") " pod="openstack/dnsmasq-dns-7bfcb9d745-xpmhd" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.662781 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/443266d5-179a-4196-9db8-e41cd5c3046b-config\") pod \"dnsmasq-dns-758b79db4c-lglwz\" (UID: \"443266d5-179a-4196-9db8-e41cd5c3046b\") " pod="openstack/dnsmasq-dns-758b79db4c-lglwz" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.682496 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf655\" (UniqueName: \"kubernetes.io/projected/443266d5-179a-4196-9db8-e41cd5c3046b-kube-api-access-nf655\") pod \"dnsmasq-dns-758b79db4c-lglwz\" (UID: \"443266d5-179a-4196-9db8-e41cd5c3046b\") " pod="openstack/dnsmasq-dns-758b79db4c-lglwz" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.682791 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6nvl\" (UniqueName: \"kubernetes.io/projected/93c4099c-0bd6-4786-a9f3-15f023e50ff9-kube-api-access-d6nvl\") pod \"dnsmasq-dns-7bfcb9d745-xpmhd\" (UID: \"93c4099c-0bd6-4786-a9f3-15f023e50ff9\") " pod="openstack/dnsmasq-dns-7bfcb9d745-xpmhd" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.722075 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfcb9d745-xpmhd" Oct 11 07:12:32 crc kubenswrapper[5055]: I1011 07:12:32.782496 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-758b79db4c-lglwz" Oct 11 07:12:33 crc kubenswrapper[5055]: I1011 07:12:33.137121 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="535dac1f0973003af975be73a91e8fd4d0110e656b0a5fa5041beaefb3cb8f58" exitCode=0 Oct 11 07:12:33 crc kubenswrapper[5055]: I1011 07:12:33.137191 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"535dac1f0973003af975be73a91e8fd4d0110e656b0a5fa5041beaefb3cb8f58"} Oct 11 07:12:33 crc kubenswrapper[5055]: I1011 07:12:33.137472 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"28fb3395db8b46734147bf5f696c45faf64d183a1700bf2cea197e5b447049b3"} Oct 11 07:12:33 crc kubenswrapper[5055]: I1011 07:12:33.137496 5055 scope.go:117] "RemoveContainer" containerID="544f62b5c981db9fe174f64795733dee914a3486ae2b8f0ec320ed7466166586" Oct 11 07:12:33 crc kubenswrapper[5055]: I1011 07:12:33.156744 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bfcb9d745-xpmhd"] Oct 11 07:12:33 crc kubenswrapper[5055]: W1011 07:12:33.166467 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod93c4099c_0bd6_4786_a9f3_15f023e50ff9.slice/crio-00de1645966c054fd2359cefc10ac7cc71c215f7f715ba04f7d5e4e234762538 WatchSource:0}: Error finding container 00de1645966c054fd2359cefc10ac7cc71c215f7f715ba04f7d5e4e234762538: Status 404 returned error can't find the container with id 00de1645966c054fd2359cefc10ac7cc71c215f7f715ba04f7d5e4e234762538 Oct 11 07:12:33 crc kubenswrapper[5055]: I1011 07:12:33.168406 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 07:12:33 crc kubenswrapper[5055]: I1011 07:12:33.232553 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-758b79db4c-lglwz"] Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.059965 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-758b79db4c-lglwz"] Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.071179 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8575fc99d7-qsgn7"] Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.073152 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.108655 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8575fc99d7-qsgn7"] Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.179703 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24e89429-9e61-4764-9f49-9063cf8ea9ba-dns-svc\") pod \"dnsmasq-dns-8575fc99d7-qsgn7\" (UID: \"24e89429-9e61-4764-9f49-9063cf8ea9ba\") " pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.179806 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24e89429-9e61-4764-9f49-9063cf8ea9ba-config\") pod \"dnsmasq-dns-8575fc99d7-qsgn7\" (UID: \"24e89429-9e61-4764-9f49-9063cf8ea9ba\") " pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.179847 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zm78j\" (UniqueName: \"kubernetes.io/projected/24e89429-9e61-4764-9f49-9063cf8ea9ba-kube-api-access-zm78j\") pod \"dnsmasq-dns-8575fc99d7-qsgn7\" (UID: \"24e89429-9e61-4764-9f49-9063cf8ea9ba\") " pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.187066 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-758b79db4c-lglwz" event={"ID":"443266d5-179a-4196-9db8-e41cd5c3046b","Type":"ContainerStarted","Data":"31a69e7c9561e0fae28f2c107d8324a077027628ba683eeb7e57edd3a09311d9"} Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.203061 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bfcb9d745-xpmhd" event={"ID":"93c4099c-0bd6-4786-a9f3-15f023e50ff9","Type":"ContainerStarted","Data":"00de1645966c054fd2359cefc10ac7cc71c215f7f715ba04f7d5e4e234762538"} Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.281053 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24e89429-9e61-4764-9f49-9063cf8ea9ba-dns-svc\") pod \"dnsmasq-dns-8575fc99d7-qsgn7\" (UID: \"24e89429-9e61-4764-9f49-9063cf8ea9ba\") " pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.281151 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24e89429-9e61-4764-9f49-9063cf8ea9ba-config\") pod \"dnsmasq-dns-8575fc99d7-qsgn7\" (UID: \"24e89429-9e61-4764-9f49-9063cf8ea9ba\") " pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.281189 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zm78j\" (UniqueName: \"kubernetes.io/projected/24e89429-9e61-4764-9f49-9063cf8ea9ba-kube-api-access-zm78j\") pod \"dnsmasq-dns-8575fc99d7-qsgn7\" (UID: \"24e89429-9e61-4764-9f49-9063cf8ea9ba\") " pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.281962 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24e89429-9e61-4764-9f49-9063cf8ea9ba-dns-svc\") pod \"dnsmasq-dns-8575fc99d7-qsgn7\" (UID: \"24e89429-9e61-4764-9f49-9063cf8ea9ba\") " pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.282901 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24e89429-9e61-4764-9f49-9063cf8ea9ba-config\") pod \"dnsmasq-dns-8575fc99d7-qsgn7\" (UID: \"24e89429-9e61-4764-9f49-9063cf8ea9ba\") " pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.307106 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zm78j\" (UniqueName: \"kubernetes.io/projected/24e89429-9e61-4764-9f49-9063cf8ea9ba-kube-api-access-zm78j\") pod \"dnsmasq-dns-8575fc99d7-qsgn7\" (UID: \"24e89429-9e61-4764-9f49-9063cf8ea9ba\") " pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.427695 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.701717 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bfcb9d745-xpmhd"] Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.731275 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77597f887-9hf9x"] Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.733596 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77597f887-9hf9x" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.746391 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77597f887-9hf9x"] Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.789988 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca9cc8e6-753f-4235-a1f3-d547472c47c2-config\") pod \"dnsmasq-dns-77597f887-9hf9x\" (UID: \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\") " pod="openstack/dnsmasq-dns-77597f887-9hf9x" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.790076 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ca9cc8e6-753f-4235-a1f3-d547472c47c2-dns-svc\") pod \"dnsmasq-dns-77597f887-9hf9x\" (UID: \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\") " pod="openstack/dnsmasq-dns-77597f887-9hf9x" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.790114 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjbvv\" (UniqueName: \"kubernetes.io/projected/ca9cc8e6-753f-4235-a1f3-d547472c47c2-kube-api-access-sjbvv\") pod \"dnsmasq-dns-77597f887-9hf9x\" (UID: \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\") " pod="openstack/dnsmasq-dns-77597f887-9hf9x" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.891941 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ca9cc8e6-753f-4235-a1f3-d547472c47c2-dns-svc\") pod \"dnsmasq-dns-77597f887-9hf9x\" (UID: \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\") " pod="openstack/dnsmasq-dns-77597f887-9hf9x" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.892017 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjbvv\" (UniqueName: \"kubernetes.io/projected/ca9cc8e6-753f-4235-a1f3-d547472c47c2-kube-api-access-sjbvv\") pod \"dnsmasq-dns-77597f887-9hf9x\" (UID: \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\") " pod="openstack/dnsmasq-dns-77597f887-9hf9x" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.892076 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca9cc8e6-753f-4235-a1f3-d547472c47c2-config\") pod \"dnsmasq-dns-77597f887-9hf9x\" (UID: \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\") " pod="openstack/dnsmasq-dns-77597f887-9hf9x" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.893372 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca9cc8e6-753f-4235-a1f3-d547472c47c2-config\") pod \"dnsmasq-dns-77597f887-9hf9x\" (UID: \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\") " pod="openstack/dnsmasq-dns-77597f887-9hf9x" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.893720 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ca9cc8e6-753f-4235-a1f3-d547472c47c2-dns-svc\") pod \"dnsmasq-dns-77597f887-9hf9x\" (UID: \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\") " pod="openstack/dnsmasq-dns-77597f887-9hf9x" Oct 11 07:12:34 crc kubenswrapper[5055]: I1011 07:12:34.917087 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjbvv\" (UniqueName: \"kubernetes.io/projected/ca9cc8e6-753f-4235-a1f3-d547472c47c2-kube-api-access-sjbvv\") pod \"dnsmasq-dns-77597f887-9hf9x\" (UID: \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\") " pod="openstack/dnsmasq-dns-77597f887-9hf9x" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.064287 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77597f887-9hf9x" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.168620 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8575fc99d7-qsgn7"] Oct 11 07:12:35 crc kubenswrapper[5055]: W1011 07:12:35.184021 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod24e89429_9e61_4764_9f49_9063cf8ea9ba.slice/crio-f7c0a326580000beb51b355491a4b907be48140b8103a67fe0f03ccbe30489e3 WatchSource:0}: Error finding container f7c0a326580000beb51b355491a4b907be48140b8103a67fe0f03ccbe30489e3: Status 404 returned error can't find the container with id f7c0a326580000beb51b355491a4b907be48140b8103a67fe0f03ccbe30489e3 Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.216455 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.218342 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.220882 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.221041 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.221090 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-nljcx" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.221117 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.221169 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.221192 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.234780 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.238485 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.245494 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" event={"ID":"24e89429-9e61-4764-9f49-9063cf8ea9ba","Type":"ContainerStarted","Data":"f7c0a326580000beb51b355491a4b907be48140b8103a67fe0f03ccbe30489e3"} Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.303941 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.304042 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.304069 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.304087 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmtr8\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-kube-api-access-cmtr8\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.304118 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.304148 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.304189 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.304205 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.304225 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.304250 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6baacc00-a270-4662-ba67-aad18287df2c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.304275 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6baacc00-a270-4662-ba67-aad18287df2c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.406061 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.406109 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.406134 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.406155 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmtr8\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-kube-api-access-cmtr8\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.406195 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.406220 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.406260 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.406285 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.406310 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.406335 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6baacc00-a270-4662-ba67-aad18287df2c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.406364 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6baacc00-a270-4662-ba67-aad18287df2c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.406713 5055 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.407254 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.407289 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.407498 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.408137 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.408780 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.412095 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6baacc00-a270-4662-ba67-aad18287df2c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.412591 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.412668 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6baacc00-a270-4662-ba67-aad18287df2c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.413214 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.424258 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmtr8\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-kube-api-access-cmtr8\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.428026 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.501898 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77597f887-9hf9x"] Oct 11 07:12:35 crc kubenswrapper[5055]: W1011 07:12:35.510332 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca9cc8e6_753f_4235_a1f3_d547472c47c2.slice/crio-3766d3c2bccce39cf57e534e5762064abf3f1dc64cf6677f9a69e356749ad56d WatchSource:0}: Error finding container 3766d3c2bccce39cf57e534e5762064abf3f1dc64cf6677f9a69e356749ad56d: Status 404 returned error can't find the container with id 3766d3c2bccce39cf57e534e5762064abf3f1dc64cf6677f9a69e356749ad56d Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.545038 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.823587 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.829149 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.831454 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.831992 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.832379 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.832556 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.832654 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.832900 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.833105 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-f5jcs" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.837786 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.920495 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.920550 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.920591 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.920615 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.920635 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-server-conf\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.920658 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.920877 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.920918 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hl52b\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-kube-api-access-hl52b\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.920945 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/80597a79-e3fd-41cd-b035-a35494775fcb-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.921057 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:35 crc kubenswrapper[5055]: I1011 07:12:35.921131 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/80597a79-e3fd-41cd-b035-a35494775fcb-pod-info\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.022123 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/80597a79-e3fd-41cd-b035-a35494775fcb-pod-info\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.022170 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.022193 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.022232 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.022257 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.022274 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-server-conf\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.022297 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.022320 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.022341 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hl52b\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-kube-api-access-hl52b\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.022356 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/80597a79-e3fd-41cd-b035-a35494775fcb-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.023892 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-server-conf\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.024084 5055 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.024621 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.025022 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.025075 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.026660 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.030095 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.030473 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/80597a79-e3fd-41cd-b035-a35494775fcb-pod-info\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.033001 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/80597a79-e3fd-41cd-b035-a35494775fcb-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.037794 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.038181 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.048688 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hl52b\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-kube-api-access-hl52b\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.055058 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.061327 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-server-0\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: W1011 07:12:36.067911 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6baacc00_a270_4662_ba67_aad18287df2c.slice/crio-0bfc61d188e773eec448e25619ee53677630ef0d3cab1639155d5561dcf3c97f WatchSource:0}: Error finding container 0bfc61d188e773eec448e25619ee53677630ef0d3cab1639155d5561dcf3c97f: Status 404 returned error can't find the container with id 0bfc61d188e773eec448e25619ee53677630ef0d3cab1639155d5561dcf3c97f Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.155971 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.292984 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77597f887-9hf9x" event={"ID":"ca9cc8e6-753f-4235-a1f3-d547472c47c2","Type":"ContainerStarted","Data":"3766d3c2bccce39cf57e534e5762064abf3f1dc64cf6677f9a69e356749ad56d"} Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.298570 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6baacc00-a270-4662-ba67-aad18287df2c","Type":"ContainerStarted","Data":"0bfc61d188e773eec448e25619ee53677630ef0d3cab1639155d5561dcf3c97f"} Oct 11 07:12:36 crc kubenswrapper[5055]: I1011 07:12:36.688076 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 07:12:36 crc kubenswrapper[5055]: W1011 07:12:36.720163 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80597a79_e3fd_41cd_b035_a35494775fcb.slice/crio-dfd5dce990ae800c1bc52587c308c4264b7b7a1cbc5657bb76970072b00bed33 WatchSource:0}: Error finding container dfd5dce990ae800c1bc52587c308c4264b7b7a1cbc5657bb76970072b00bed33: Status 404 returned error can't find the container with id dfd5dce990ae800c1bc52587c308c4264b7b7a1cbc5657bb76970072b00bed33 Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.304407 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.310434 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.314500 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.314724 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-zwvpl" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.316636 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.317297 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.320572 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.320653 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.323371 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.325914 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"80597a79-e3fd-41cd-b035-a35494775fcb","Type":"ContainerStarted","Data":"dfd5dce990ae800c1bc52587c308c4264b7b7a1cbc5657bb76970072b00bed33"} Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.346788 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.346844 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-kolla-config\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.346872 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-config-data-default\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.346922 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.346962 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.346994 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.347020 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkx6v\" (UniqueName: \"kubernetes.io/projected/b6c56af9-552f-4b5a-9648-f28af1328534-kube-api-access-dkx6v\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.347064 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-secrets\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.347083 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b6c56af9-552f-4b5a-9648-f28af1328534-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.456721 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-kolla-config\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.457187 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-config-data-default\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.457256 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.457298 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.457332 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.457354 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkx6v\" (UniqueName: \"kubernetes.io/projected/b6c56af9-552f-4b5a-9648-f28af1328534-kube-api-access-dkx6v\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.457437 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-secrets\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.457467 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b6c56af9-552f-4b5a-9648-f28af1328534-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.457496 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.457570 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-kolla-config\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.457906 5055 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.460559 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-config-data-default\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.463561 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b6c56af9-552f-4b5a-9648-f28af1328534-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.464157 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.465959 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-secrets\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.466086 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.468415 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.485408 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkx6v\" (UniqueName: \"kubernetes.io/projected/b6c56af9-552f-4b5a-9648-f28af1328534-kube-api-access-dkx6v\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.536182 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-galera-0\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " pod="openstack/openstack-galera-0" Oct 11 07:12:37 crc kubenswrapper[5055]: I1011 07:12:37.634103 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.091447 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.533464 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.535382 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.539323 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-wdgf9" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.541248 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.541828 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.542445 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.559516 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.593522 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.593576 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.593653 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.593900 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.593985 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5s5nf\" (UniqueName: \"kubernetes.io/projected/01de7a83-4a08-494a-a3b9-774e6787e30f-kube-api-access-5s5nf\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.594150 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.594290 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.594486 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/01de7a83-4a08-494a-a3b9-774e6787e30f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.594562 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.703484 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.703521 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/01de7a83-4a08-494a-a3b9-774e6787e30f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.703571 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.703590 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.703624 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.703647 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.703665 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5s5nf\" (UniqueName: \"kubernetes.io/projected/01de7a83-4a08-494a-a3b9-774e6787e30f-kube-api-access-5s5nf\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.703740 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.703796 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.704846 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.705470 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.705878 5055 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.718581 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.721614 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/01de7a83-4a08-494a-a3b9-774e6787e30f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.727400 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.729216 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.742729 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.743132 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.755997 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5s5nf\" (UniqueName: \"kubernetes.io/projected/01de7a83-4a08-494a-a3b9-774e6787e30f-kube-api-access-5s5nf\") pod \"openstack-cell1-galera-0\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.861125 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.922279 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.923318 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.930030 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.930057 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.930669 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-79wvb" Oct 11 07:12:38 crc kubenswrapper[5055]: I1011 07:12:38.940255 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.009610 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1bebbd2e-a315-493d-820b-69e8dc749ee1-kolla-config\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.014301 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bebbd2e-a315-493d-820b-69e8dc749ee1-combined-ca-bundle\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.014347 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1bebbd2e-a315-493d-820b-69e8dc749ee1-config-data\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.014379 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/1bebbd2e-a315-493d-820b-69e8dc749ee1-memcached-tls-certs\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.014428 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wl6sj\" (UniqueName: \"kubernetes.io/projected/1bebbd2e-a315-493d-820b-69e8dc749ee1-kube-api-access-wl6sj\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.117228 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bebbd2e-a315-493d-820b-69e8dc749ee1-combined-ca-bundle\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.117301 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1bebbd2e-a315-493d-820b-69e8dc749ee1-config-data\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.117319 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/1bebbd2e-a315-493d-820b-69e8dc749ee1-memcached-tls-certs\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.117479 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wl6sj\" (UniqueName: \"kubernetes.io/projected/1bebbd2e-a315-493d-820b-69e8dc749ee1-kube-api-access-wl6sj\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.117691 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1bebbd2e-a315-493d-820b-69e8dc749ee1-kolla-config\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.121054 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1bebbd2e-a315-493d-820b-69e8dc749ee1-config-data\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.121416 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1bebbd2e-a315-493d-820b-69e8dc749ee1-kolla-config\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.131753 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bebbd2e-a315-493d-820b-69e8dc749ee1-combined-ca-bundle\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.139214 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wl6sj\" (UniqueName: \"kubernetes.io/projected/1bebbd2e-a315-493d-820b-69e8dc749ee1-kube-api-access-wl6sj\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.159348 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/1bebbd2e-a315-493d-820b-69e8dc749ee1-memcached-tls-certs\") pod \"memcached-0\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " pod="openstack/memcached-0" Oct 11 07:12:39 crc kubenswrapper[5055]: I1011 07:12:39.247547 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 11 07:12:40 crc kubenswrapper[5055]: I1011 07:12:40.440418 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 07:12:40 crc kubenswrapper[5055]: I1011 07:12:40.441605 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 11 07:12:40 crc kubenswrapper[5055]: I1011 07:12:40.446493 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-s89wf" Oct 11 07:12:40 crc kubenswrapper[5055]: I1011 07:12:40.483183 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 07:12:40 crc kubenswrapper[5055]: I1011 07:12:40.541016 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pcd4\" (UniqueName: \"kubernetes.io/projected/cae844be-3146-4234-b3f8-6c3aba5defe8-kube-api-access-7pcd4\") pod \"kube-state-metrics-0\" (UID: \"cae844be-3146-4234-b3f8-6c3aba5defe8\") " pod="openstack/kube-state-metrics-0" Oct 11 07:12:40 crc kubenswrapper[5055]: I1011 07:12:40.642202 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pcd4\" (UniqueName: \"kubernetes.io/projected/cae844be-3146-4234-b3f8-6c3aba5defe8-kube-api-access-7pcd4\") pod \"kube-state-metrics-0\" (UID: \"cae844be-3146-4234-b3f8-6c3aba5defe8\") " pod="openstack/kube-state-metrics-0" Oct 11 07:12:40 crc kubenswrapper[5055]: I1011 07:12:40.663232 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pcd4\" (UniqueName: \"kubernetes.io/projected/cae844be-3146-4234-b3f8-6c3aba5defe8-kube-api-access-7pcd4\") pod \"kube-state-metrics-0\" (UID: \"cae844be-3146-4234-b3f8-6c3aba5defe8\") " pod="openstack/kube-state-metrics-0" Oct 11 07:12:40 crc kubenswrapper[5055]: I1011 07:12:40.798262 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 11 07:12:42 crc kubenswrapper[5055]: W1011 07:12:42.159959 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6c56af9_552f_4b5a_9648_f28af1328534.slice/crio-fabd2926d4dcd664ef9ed674710907fa694b3f741c421638b2feeca778cdbc3d WatchSource:0}: Error finding container fabd2926d4dcd664ef9ed674710907fa694b3f741c421638b2feeca778cdbc3d: Status 404 returned error can't find the container with id fabd2926d4dcd664ef9ed674710907fa694b3f741c421638b2feeca778cdbc3d Oct 11 07:12:42 crc kubenswrapper[5055]: I1011 07:12:42.389548 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b6c56af9-552f-4b5a-9648-f28af1328534","Type":"ContainerStarted","Data":"fabd2926d4dcd664ef9ed674710907fa694b3f741c421638b2feeca778cdbc3d"} Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.010126 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-5t8kh"] Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.012348 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.016697 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.018397 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-qt55m" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.031454 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-2l5hh"] Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.032686 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.053075 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-5t8kh"] Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.071101 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.085913 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-2l5hh"] Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.117754 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-run-ovn\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.117874 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-run\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.117902 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/51322291-81d0-4cbc-a761-0294a8365fd3-ovn-controller-tls-certs\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.117920 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-log\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.117970 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-log-ovn\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.118051 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cc7bh\" (UniqueName: \"kubernetes.io/projected/c5b9917d-52e6-4d08-a131-4d1b8b809161-kube-api-access-cc7bh\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.118093 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5b9917d-52e6-4d08-a131-4d1b8b809161-scripts\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.118129 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-run\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.118213 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51322291-81d0-4cbc-a761-0294a8365fd3-scripts\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.118269 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-lib\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.136016 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-etc-ovs\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.136097 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgvsl\" (UniqueName: \"kubernetes.io/projected/51322291-81d0-4cbc-a761-0294a8365fd3-kube-api-access-bgvsl\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.136131 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51322291-81d0-4cbc-a761-0294a8365fd3-combined-ca-bundle\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.237180 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-lib\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.237240 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-etc-ovs\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.237265 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgvsl\" (UniqueName: \"kubernetes.io/projected/51322291-81d0-4cbc-a761-0294a8365fd3-kube-api-access-bgvsl\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.237294 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51322291-81d0-4cbc-a761-0294a8365fd3-combined-ca-bundle\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.237343 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-run-ovn\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.237378 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-run\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.237400 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/51322291-81d0-4cbc-a761-0294a8365fd3-ovn-controller-tls-certs\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.237421 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-log\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.237453 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-log-ovn\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.237488 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cc7bh\" (UniqueName: \"kubernetes.io/projected/c5b9917d-52e6-4d08-a131-4d1b8b809161-kube-api-access-cc7bh\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.237519 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5b9917d-52e6-4d08-a131-4d1b8b809161-scripts\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.237550 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-run\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.237580 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51322291-81d0-4cbc-a761-0294a8365fd3-scripts\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.238155 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-lib\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.239524 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-log\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.239692 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-log-ovn\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.240161 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-run\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.240280 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-run-ovn\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.240426 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-etc-ovs\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.240472 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-run\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.240490 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51322291-81d0-4cbc-a761-0294a8365fd3-scripts\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.242874 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5b9917d-52e6-4d08-a131-4d1b8b809161-scripts\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.249488 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/51322291-81d0-4cbc-a761-0294a8365fd3-ovn-controller-tls-certs\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.249506 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51322291-81d0-4cbc-a761-0294a8365fd3-combined-ca-bundle\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.254721 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cc7bh\" (UniqueName: \"kubernetes.io/projected/c5b9917d-52e6-4d08-a131-4d1b8b809161-kube-api-access-cc7bh\") pod \"ovn-controller-ovs-5t8kh\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.256938 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgvsl\" (UniqueName: \"kubernetes.io/projected/51322291-81d0-4cbc-a761-0294a8365fd3-kube-api-access-bgvsl\") pod \"ovn-controller-2l5hh\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.332378 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.355053 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2l5hh" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.421416 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.423255 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.426461 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.426991 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-f22ql" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.427142 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.428250 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.428507 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.444561 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.542125 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.542246 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbzp9\" (UniqueName: \"kubernetes.io/projected/5a2fd839-7187-4fce-be78-2a911103f8cc-kube-api-access-qbzp9\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.542273 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.542340 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a2fd839-7187-4fce-be78-2a911103f8cc-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.542381 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5a2fd839-7187-4fce-be78-2a911103f8cc-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.542440 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.542461 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a2fd839-7187-4fce-be78-2a911103f8cc-config\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.542511 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.646244 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbzp9\" (UniqueName: \"kubernetes.io/projected/5a2fd839-7187-4fce-be78-2a911103f8cc-kube-api-access-qbzp9\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.646346 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.646378 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a2fd839-7187-4fce-be78-2a911103f8cc-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.646396 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5a2fd839-7187-4fce-be78-2a911103f8cc-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.646461 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.646477 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a2fd839-7187-4fce-be78-2a911103f8cc-config\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.646502 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.646560 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.647135 5055 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.648086 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a2fd839-7187-4fce-be78-2a911103f8cc-config\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.650605 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a2fd839-7187-4fce-be78-2a911103f8cc-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.650695 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5a2fd839-7187-4fce-be78-2a911103f8cc-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.654806 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.661617 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.666946 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbzp9\" (UniqueName: \"kubernetes.io/projected/5a2fd839-7187-4fce-be78-2a911103f8cc-kube-api-access-qbzp9\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.668307 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.680075 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:45 crc kubenswrapper[5055]: I1011 07:12:45.759390 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.364387 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.366328 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.369806 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.369924 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.370142 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-rpbzm" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.371208 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.376279 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.505303 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/728d8a1e-9c94-49ce-94f4-491bf34a9b16-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.505384 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.505437 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/728d8a1e-9c94-49ce-94f4-491bf34a9b16-config\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.505480 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.505597 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.505647 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.505676 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/728d8a1e-9c94-49ce-94f4-491bf34a9b16-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.505723 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wllkk\" (UniqueName: \"kubernetes.io/projected/728d8a1e-9c94-49ce-94f4-491bf34a9b16-kube-api-access-wllkk\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.607016 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.607093 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.607118 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.607142 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/728d8a1e-9c94-49ce-94f4-491bf34a9b16-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.607165 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wllkk\" (UniqueName: \"kubernetes.io/projected/728d8a1e-9c94-49ce-94f4-491bf34a9b16-kube-api-access-wllkk\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.607258 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/728d8a1e-9c94-49ce-94f4-491bf34a9b16-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.607279 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.607305 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/728d8a1e-9c94-49ce-94f4-491bf34a9b16-config\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.608018 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/728d8a1e-9c94-49ce-94f4-491bf34a9b16-config\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.609161 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/728d8a1e-9c94-49ce-94f4-491bf34a9b16-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.609453 5055 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.613992 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/728d8a1e-9c94-49ce-94f4-491bf34a9b16-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.619150 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.625638 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.627674 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.627911 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.630667 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wllkk\" (UniqueName: \"kubernetes.io/projected/728d8a1e-9c94-49ce-94f4-491bf34a9b16-kube-api-access-wllkk\") pod \"ovsdbserver-sb-0\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:48 crc kubenswrapper[5055]: I1011 07:12:48.694209 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 11 07:12:56 crc kubenswrapper[5055]: E1011 07:12:56.518333 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:adcdeb8ecd601fb03c3b0901d5b5111af2ca48f7dd443e22224db6daaf08f5d0" Oct 11 07:12:56 crc kubenswrapper[5055]: E1011 07:12:56.520240 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:adcdeb8ecd601fb03c3b0901d5b5111af2ca48f7dd443e22224db6daaf08f5d0,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hl52b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(80597a79-e3fd-41cd-b035-a35494775fcb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 07:12:56 crc kubenswrapper[5055]: E1011 07:12:56.521568 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="80597a79-e3fd-41cd-b035-a35494775fcb" Oct 11 07:12:57 crc kubenswrapper[5055]: E1011 07:12:57.263069 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df" Oct 11 07:12:57 crc kubenswrapper[5055]: E1011 07:12:57.263498 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nf655,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-758b79db4c-lglwz_openstack(443266d5-179a-4196-9db8-e41cd5c3046b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 07:12:57 crc kubenswrapper[5055]: E1011 07:12:57.264876 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-758b79db4c-lglwz" podUID="443266d5-179a-4196-9db8-e41cd5c3046b" Oct 11 07:13:02 crc kubenswrapper[5055]: E1011 07:13:02.562830 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df" Oct 11 07:13:02 crc kubenswrapper[5055]: E1011 07:13:02.564478 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d6nvl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7bfcb9d745-xpmhd_openstack(93c4099c-0bd6-4786-a9f3-15f023e50ff9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 07:13:02 crc kubenswrapper[5055]: E1011 07:13:02.566069 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7bfcb9d745-xpmhd" podUID="93c4099c-0bd6-4786-a9f3-15f023e50ff9" Oct 11 07:13:02 crc kubenswrapper[5055]: E1011 07:13:02.923844 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df" Oct 11 07:13:02 crc kubenswrapper[5055]: E1011 07:13:02.924500 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nfdh5dfhb6h64h676hc4h78h97h669h54chfbh696hb5h54bh5d4h6bh64h644h677h584h5cbh698h9dh5bbh5f8h5b8hcdh644h5c7h694hbfh589q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zm78j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-8575fc99d7-qsgn7_openstack(24e89429-9e61-4764-9f49-9063cf8ea9ba): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 07:13:02 crc kubenswrapper[5055]: E1011 07:13:02.926990 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" podUID="24e89429-9e61-4764-9f49-9063cf8ea9ba" Oct 11 07:13:02 crc kubenswrapper[5055]: E1011 07:13:02.955047 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df" Oct 11 07:13:02 crc kubenswrapper[5055]: E1011 07:13:02.955266 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sjbvv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-77597f887-9hf9x_openstack(ca9cc8e6-753f-4235-a1f3-d547472c47c2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 07:13:02 crc kubenswrapper[5055]: E1011 07:13:02.956460 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-77597f887-9hf9x" podUID="ca9cc8e6-753f-4235-a1f3-d547472c47c2" Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.416451 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-758b79db4c-lglwz" Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.532877 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/443266d5-179a-4196-9db8-e41cd5c3046b-dns-svc\") pod \"443266d5-179a-4196-9db8-e41cd5c3046b\" (UID: \"443266d5-179a-4196-9db8-e41cd5c3046b\") " Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.533273 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/443266d5-179a-4196-9db8-e41cd5c3046b-config\") pod \"443266d5-179a-4196-9db8-e41cd5c3046b\" (UID: \"443266d5-179a-4196-9db8-e41cd5c3046b\") " Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.533317 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nf655\" (UniqueName: \"kubernetes.io/projected/443266d5-179a-4196-9db8-e41cd5c3046b-kube-api-access-nf655\") pod \"443266d5-179a-4196-9db8-e41cd5c3046b\" (UID: \"443266d5-179a-4196-9db8-e41cd5c3046b\") " Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.533534 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/443266d5-179a-4196-9db8-e41cd5c3046b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "443266d5-179a-4196-9db8-e41cd5c3046b" (UID: "443266d5-179a-4196-9db8-e41cd5c3046b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.533843 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/443266d5-179a-4196-9db8-e41cd5c3046b-config" (OuterVolumeSpecName: "config") pod "443266d5-179a-4196-9db8-e41cd5c3046b" (UID: "443266d5-179a-4196-9db8-e41cd5c3046b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.533965 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/443266d5-179a-4196-9db8-e41cd5c3046b-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.539058 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/443266d5-179a-4196-9db8-e41cd5c3046b-kube-api-access-nf655" (OuterVolumeSpecName: "kube-api-access-nf655") pod "443266d5-179a-4196-9db8-e41cd5c3046b" (UID: "443266d5-179a-4196-9db8-e41cd5c3046b"). InnerVolumeSpecName "kube-api-access-nf655". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.577663 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-758b79db4c-lglwz" Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.581518 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-758b79db4c-lglwz" event={"ID":"443266d5-179a-4196-9db8-e41cd5c3046b","Type":"ContainerDied","Data":"31a69e7c9561e0fae28f2c107d8324a077027628ba683eeb7e57edd3a09311d9"} Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.636805 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/443266d5-179a-4196-9db8-e41cd5c3046b-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.636839 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nf655\" (UniqueName: \"kubernetes.io/projected/443266d5-179a-4196-9db8-e41cd5c3046b-kube-api-access-nf655\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.712555 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-758b79db4c-lglwz"] Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.720275 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-758b79db4c-lglwz"] Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.737032 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-2l5hh"] Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.751481 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.964516 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.981970 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 11 07:13:03 crc kubenswrapper[5055]: I1011 07:13:03.988061 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 11 07:13:04 crc kubenswrapper[5055]: E1011 07:13:04.037029 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df\\\"\"" pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" podUID="24e89429-9e61-4764-9f49-9063cf8ea9ba" Oct 11 07:13:04 crc kubenswrapper[5055]: E1011 07:13:04.037278 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:c4e71b2158fd939dad8b8e705273493051d3023273d23b279f2699dce6db33df\\\"\"" pod="openstack/dnsmasq-dns-77597f887-9hf9x" podUID="ca9cc8e6-753f-4235-a1f3-d547472c47c2" Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.055646 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 11 07:13:04 crc kubenswrapper[5055]: W1011 07:13:04.286366 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1bebbd2e_a315_493d_820b_69e8dc749ee1.slice/crio-ac9047c1f2ad34b0364dff7b138c6c13158775b692eee5cdd0bb757560a5f6c8 WatchSource:0}: Error finding container ac9047c1f2ad34b0364dff7b138c6c13158775b692eee5cdd0bb757560a5f6c8: Status 404 returned error can't find the container with id ac9047c1f2ad34b0364dff7b138c6c13158775b692eee5cdd0bb757560a5f6c8 Oct 11 07:13:04 crc kubenswrapper[5055]: W1011 07:13:04.386709 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcae844be_3146_4234_b3f8_6c3aba5defe8.slice/crio-fe3c6023a561eb422be90545fc77014f830803003c6fcd569d5ca86b65d484cc WatchSource:0}: Error finding container fe3c6023a561eb422be90545fc77014f830803003c6fcd569d5ca86b65d484cc: Status 404 returned error can't find the container with id fe3c6023a561eb422be90545fc77014f830803003c6fcd569d5ca86b65d484cc Oct 11 07:13:04 crc kubenswrapper[5055]: W1011 07:13:04.388325 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod01de7a83_4a08_494a_a3b9_774e6787e30f.slice/crio-9e0288c284168c302b026f8fa535bc1a6b3b6d591b4180866429e5afe6ef8460 WatchSource:0}: Error finding container 9e0288c284168c302b026f8fa535bc1a6b3b6d591b4180866429e5afe6ef8460: Status 404 returned error can't find the container with id 9e0288c284168c302b026f8fa535bc1a6b3b6d591b4180866429e5afe6ef8460 Oct 11 07:13:04 crc kubenswrapper[5055]: W1011 07:13:04.390481 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod728d8a1e_9c94_49ce_94f4_491bf34a9b16.slice/crio-54dafb3189f4c266b9a8f1df83b35d9ddb0496452f4a5ca42692c7f83479abee WatchSource:0}: Error finding container 54dafb3189f4c266b9a8f1df83b35d9ddb0496452f4a5ca42692c7f83479abee: Status 404 returned error can't find the container with id 54dafb3189f4c266b9a8f1df83b35d9ddb0496452f4a5ca42692c7f83479abee Oct 11 07:13:04 crc kubenswrapper[5055]: W1011 07:13:04.394460 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a2fd839_7187_4fce_be78_2a911103f8cc.slice/crio-06f9a2ffeba0f853453c090299cdf535d66d84a9728557181ed3eab39c692a54 WatchSource:0}: Error finding container 06f9a2ffeba0f853453c090299cdf535d66d84a9728557181ed3eab39c692a54: Status 404 returned error can't find the container with id 06f9a2ffeba0f853453c090299cdf535d66d84a9728557181ed3eab39c692a54 Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.456505 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfcb9d745-xpmhd" Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.590381 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfcb9d745-xpmhd" Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.590477 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bfcb9d745-xpmhd" event={"ID":"93c4099c-0bd6-4786-a9f3-15f023e50ff9","Type":"ContainerDied","Data":"00de1645966c054fd2359cefc10ac7cc71c215f7f715ba04f7d5e4e234762538"} Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.595392 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5a2fd839-7187-4fce-be78-2a911103f8cc","Type":"ContainerStarted","Data":"06f9a2ffeba0f853453c090299cdf535d66d84a9728557181ed3eab39c692a54"} Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.596638 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"cae844be-3146-4234-b3f8-6c3aba5defe8","Type":"ContainerStarted","Data":"fe3c6023a561eb422be90545fc77014f830803003c6fcd569d5ca86b65d484cc"} Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.597967 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2l5hh" event={"ID":"51322291-81d0-4cbc-a761-0294a8365fd3","Type":"ContainerStarted","Data":"24aa56eae5db123d03cb69ec134c4277630f172a76f2d2449612cabd90bd71a6"} Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.601115 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"01de7a83-4a08-494a-a3b9-774e6787e30f","Type":"ContainerStarted","Data":"9e0288c284168c302b026f8fa535bc1a6b3b6d591b4180866429e5afe6ef8460"} Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.602692 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"728d8a1e-9c94-49ce-94f4-491bf34a9b16","Type":"ContainerStarted","Data":"54dafb3189f4c266b9a8f1df83b35d9ddb0496452f4a5ca42692c7f83479abee"} Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.604015 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"80597a79-e3fd-41cd-b035-a35494775fcb","Type":"ContainerStarted","Data":"327706891e223da16a2f57a33e91fd6f8d0cf761a883a606b6cd91aaedf11f3e"} Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.604971 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"1bebbd2e-a315-493d-820b-69e8dc749ee1","Type":"ContainerStarted","Data":"ac9047c1f2ad34b0364dff7b138c6c13158775b692eee5cdd0bb757560a5f6c8"} Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.606795 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93c4099c-0bd6-4786-a9f3-15f023e50ff9-config\") pod \"93c4099c-0bd6-4786-a9f3-15f023e50ff9\" (UID: \"93c4099c-0bd6-4786-a9f3-15f023e50ff9\") " Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.606900 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6nvl\" (UniqueName: \"kubernetes.io/projected/93c4099c-0bd6-4786-a9f3-15f023e50ff9-kube-api-access-d6nvl\") pod \"93c4099c-0bd6-4786-a9f3-15f023e50ff9\" (UID: \"93c4099c-0bd6-4786-a9f3-15f023e50ff9\") " Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.607499 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93c4099c-0bd6-4786-a9f3-15f023e50ff9-config" (OuterVolumeSpecName: "config") pod "93c4099c-0bd6-4786-a9f3-15f023e50ff9" (UID: "93c4099c-0bd6-4786-a9f3-15f023e50ff9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.611338 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93c4099c-0bd6-4786-a9f3-15f023e50ff9-kube-api-access-d6nvl" (OuterVolumeSpecName: "kube-api-access-d6nvl") pod "93c4099c-0bd6-4786-a9f3-15f023e50ff9" (UID: "93c4099c-0bd6-4786-a9f3-15f023e50ff9"). InnerVolumeSpecName "kube-api-access-d6nvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.708298 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93c4099c-0bd6-4786-a9f3-15f023e50ff9-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.708339 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6nvl\" (UniqueName: \"kubernetes.io/projected/93c4099c-0bd6-4786-a9f3-15f023e50ff9-kube-api-access-d6nvl\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.786732 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-5t8kh"] Oct 11 07:13:04 crc kubenswrapper[5055]: W1011 07:13:04.792149 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5b9917d_52e6_4d08_a131_4d1b8b809161.slice/crio-044d7c6b9fde12607a6a10179b58395680d371f4aaae03808c6593488274fb6d WatchSource:0}: Error finding container 044d7c6b9fde12607a6a10179b58395680d371f4aaae03808c6593488274fb6d: Status 404 returned error can't find the container with id 044d7c6b9fde12607a6a10179b58395680d371f4aaae03808c6593488274fb6d Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.947317 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bfcb9d745-xpmhd"] Oct 11 07:13:04 crc kubenswrapper[5055]: I1011 07:13:04.962867 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bfcb9d745-xpmhd"] Oct 11 07:13:05 crc kubenswrapper[5055]: I1011 07:13:05.006124 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="443266d5-179a-4196-9db8-e41cd5c3046b" path="/var/lib/kubelet/pods/443266d5-179a-4196-9db8-e41cd5c3046b/volumes" Oct 11 07:13:05 crc kubenswrapper[5055]: I1011 07:13:05.006677 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93c4099c-0bd6-4786-a9f3-15f023e50ff9" path="/var/lib/kubelet/pods/93c4099c-0bd6-4786-a9f3-15f023e50ff9/volumes" Oct 11 07:13:05 crc kubenswrapper[5055]: I1011 07:13:05.613056 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"01de7a83-4a08-494a-a3b9-774e6787e30f","Type":"ContainerStarted","Data":"20eedda3bec3e7043f4bf711e2094fda9e39ca41bced8fbfd357cf964974b401"} Oct 11 07:13:05 crc kubenswrapper[5055]: I1011 07:13:05.615110 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b6c56af9-552f-4b5a-9648-f28af1328534","Type":"ContainerStarted","Data":"a36238e8a93e4627ba8d72309cd8bd57e2a2425d6a1b88bf78c70b8fff37dde5"} Oct 11 07:13:05 crc kubenswrapper[5055]: I1011 07:13:05.622325 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6baacc00-a270-4662-ba67-aad18287df2c","Type":"ContainerStarted","Data":"f0982b0e0f3eeaf7aa5484768477b7a1133fccf88ef1ea8df59f16ddad4fc302"} Oct 11 07:13:05 crc kubenswrapper[5055]: I1011 07:13:05.624071 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-5t8kh" event={"ID":"c5b9917d-52e6-4d08-a131-4d1b8b809161","Type":"ContainerStarted","Data":"044d7c6b9fde12607a6a10179b58395680d371f4aaae03808c6593488274fb6d"} Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.535432 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-p2xsl"] Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.543056 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.546434 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.548272 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-p2xsl"] Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.648504 5055 generic.go:334] "Generic (PLEG): container finished" podID="b6c56af9-552f-4b5a-9648-f28af1328534" containerID="a36238e8a93e4627ba8d72309cd8bd57e2a2425d6a1b88bf78c70b8fff37dde5" exitCode=0 Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.648580 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b6c56af9-552f-4b5a-9648-f28af1328534","Type":"ContainerDied","Data":"a36238e8a93e4627ba8d72309cd8bd57e2a2425d6a1b88bf78c70b8fff37dde5"} Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.661839 5055 generic.go:334] "Generic (PLEG): container finished" podID="01de7a83-4a08-494a-a3b9-774e6787e30f" containerID="20eedda3bec3e7043f4bf711e2094fda9e39ca41bced8fbfd357cf964974b401" exitCode=0 Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.661884 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"01de7a83-4a08-494a-a3b9-774e6787e30f","Type":"ContainerDied","Data":"20eedda3bec3e7043f4bf711e2094fda9e39ca41bced8fbfd357cf964974b401"} Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.673663 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/beb2c0e8-4291-454c-aae1-28c186965761-combined-ca-bundle\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.673733 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/beb2c0e8-4291-454c-aae1-28c186965761-ovs-rundir\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.673879 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/beb2c0e8-4291-454c-aae1-28c186965761-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.673968 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccp9v\" (UniqueName: \"kubernetes.io/projected/beb2c0e8-4291-454c-aae1-28c186965761-kube-api-access-ccp9v\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.674024 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/beb2c0e8-4291-454c-aae1-28c186965761-ovn-rundir\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.674058 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/beb2c0e8-4291-454c-aae1-28c186965761-config\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.694312 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77597f887-9hf9x"] Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.724034 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f6b595d95-lbk7j"] Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.743200 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.760362 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.789726 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/beb2c0e8-4291-454c-aae1-28c186965761-combined-ca-bundle\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.789840 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/beb2c0e8-4291-454c-aae1-28c186965761-ovs-rundir\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.789928 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/beb2c0e8-4291-454c-aae1-28c186965761-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.789979 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccp9v\" (UniqueName: \"kubernetes.io/projected/beb2c0e8-4291-454c-aae1-28c186965761-kube-api-access-ccp9v\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.790021 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/beb2c0e8-4291-454c-aae1-28c186965761-ovn-rundir\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.790046 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/beb2c0e8-4291-454c-aae1-28c186965761-config\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.791887 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/beb2c0e8-4291-454c-aae1-28c186965761-ovn-rundir\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.792696 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/beb2c0e8-4291-454c-aae1-28c186965761-ovs-rundir\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.793223 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/beb2c0e8-4291-454c-aae1-28c186965761-config\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.866925 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6b595d95-lbk7j"] Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.874411 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/beb2c0e8-4291-454c-aae1-28c186965761-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.893287 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/beb2c0e8-4291-454c-aae1-28c186965761-combined-ca-bundle\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.894735 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-config\") pod \"dnsmasq-dns-f6b595d95-lbk7j\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.894831 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-ovsdbserver-nb\") pod \"dnsmasq-dns-f6b595d95-lbk7j\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.894894 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-dns-svc\") pod \"dnsmasq-dns-f6b595d95-lbk7j\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.895018 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcp4l\" (UniqueName: \"kubernetes.io/projected/4cf19540-dac5-44be-a6be-7ab53926529c-kube-api-access-zcp4l\") pod \"dnsmasq-dns-f6b595d95-lbk7j\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.886742 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccp9v\" (UniqueName: \"kubernetes.io/projected/beb2c0e8-4291-454c-aae1-28c186965761-kube-api-access-ccp9v\") pod \"ovn-controller-metrics-p2xsl\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:08 crc kubenswrapper[5055]: I1011 07:13:08.960487 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8575fc99d7-qsgn7"] Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.019478 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcp4l\" (UniqueName: \"kubernetes.io/projected/4cf19540-dac5-44be-a6be-7ab53926529c-kube-api-access-zcp4l\") pod \"dnsmasq-dns-f6b595d95-lbk7j\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.019624 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-config\") pod \"dnsmasq-dns-f6b595d95-lbk7j\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.019673 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-ovsdbserver-nb\") pod \"dnsmasq-dns-f6b595d95-lbk7j\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.019746 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-dns-svc\") pod \"dnsmasq-dns-f6b595d95-lbk7j\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.039444 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-dns-svc\") pod \"dnsmasq-dns-f6b595d95-lbk7j\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.041094 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-config\") pod \"dnsmasq-dns-f6b595d95-lbk7j\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.049061 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-dc9d58d7-f6scn"] Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.050513 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-dc9d58d7-f6scn"] Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.050624 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.058484 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-ovsdbserver-nb\") pod \"dnsmasq-dns-f6b595d95-lbk7j\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.059326 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.109685 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcp4l\" (UniqueName: \"kubernetes.io/projected/4cf19540-dac5-44be-a6be-7ab53926529c-kube-api-access-zcp4l\") pod \"dnsmasq-dns-f6b595d95-lbk7j\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.170069 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.226812 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-ovsdbserver-nb\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.226903 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-ovsdbserver-sb\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.226934 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-config\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.226959 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5rbp\" (UniqueName: \"kubernetes.io/projected/2ef1af2d-934c-42a6-9206-f754332e3bec-kube-api-access-r5rbp\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.227013 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-dns-svc\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.328058 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-dns-svc\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.328142 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-ovsdbserver-nb\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.328177 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-ovsdbserver-sb\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.328198 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-config\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.328223 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5rbp\" (UniqueName: \"kubernetes.io/projected/2ef1af2d-934c-42a6-9206-f754332e3bec-kube-api-access-r5rbp\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.329512 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-dns-svc\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.329587 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-ovsdbserver-sb\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.330105 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-config\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.330186 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-ovsdbserver-nb\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.361542 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5rbp\" (UniqueName: \"kubernetes.io/projected/2ef1af2d-934c-42a6-9206-f754332e3bec-kube-api-access-r5rbp\") pod \"dnsmasq-dns-dc9d58d7-f6scn\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.374307 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.378681 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.484145 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77597f887-9hf9x" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.495141 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.636327 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zm78j\" (UniqueName: \"kubernetes.io/projected/24e89429-9e61-4764-9f49-9063cf8ea9ba-kube-api-access-zm78j\") pod \"24e89429-9e61-4764-9f49-9063cf8ea9ba\" (UID: \"24e89429-9e61-4764-9f49-9063cf8ea9ba\") " Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.636387 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca9cc8e6-753f-4235-a1f3-d547472c47c2-config\") pod \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\" (UID: \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\") " Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.636433 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24e89429-9e61-4764-9f49-9063cf8ea9ba-dns-svc\") pod \"24e89429-9e61-4764-9f49-9063cf8ea9ba\" (UID: \"24e89429-9e61-4764-9f49-9063cf8ea9ba\") " Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.636459 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ca9cc8e6-753f-4235-a1f3-d547472c47c2-dns-svc\") pod \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\" (UID: \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\") " Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.636581 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24e89429-9e61-4764-9f49-9063cf8ea9ba-config\") pod \"24e89429-9e61-4764-9f49-9063cf8ea9ba\" (UID: \"24e89429-9e61-4764-9f49-9063cf8ea9ba\") " Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.636601 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjbvv\" (UniqueName: \"kubernetes.io/projected/ca9cc8e6-753f-4235-a1f3-d547472c47c2-kube-api-access-sjbvv\") pod \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\" (UID: \"ca9cc8e6-753f-4235-a1f3-d547472c47c2\") " Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.636879 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca9cc8e6-753f-4235-a1f3-d547472c47c2-config" (OuterVolumeSpecName: "config") pod "ca9cc8e6-753f-4235-a1f3-d547472c47c2" (UID: "ca9cc8e6-753f-4235-a1f3-d547472c47c2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.637324 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24e89429-9e61-4764-9f49-9063cf8ea9ba-config" (OuterVolumeSpecName: "config") pod "24e89429-9e61-4764-9f49-9063cf8ea9ba" (UID: "24e89429-9e61-4764-9f49-9063cf8ea9ba"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.637396 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24e89429-9e61-4764-9f49-9063cf8ea9ba-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "24e89429-9e61-4764-9f49-9063cf8ea9ba" (UID: "24e89429-9e61-4764-9f49-9063cf8ea9ba"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.637491 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca9cc8e6-753f-4235-a1f3-d547472c47c2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ca9cc8e6-753f-4235-a1f3-d547472c47c2" (UID: "ca9cc8e6-753f-4235-a1f3-d547472c47c2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.638623 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca9cc8e6-753f-4235-a1f3-d547472c47c2-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.638647 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/24e89429-9e61-4764-9f49-9063cf8ea9ba-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.638659 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ca9cc8e6-753f-4235-a1f3-d547472c47c2-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.638670 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24e89429-9e61-4764-9f49-9063cf8ea9ba-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.640344 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24e89429-9e61-4764-9f49-9063cf8ea9ba-kube-api-access-zm78j" (OuterVolumeSpecName: "kube-api-access-zm78j") pod "24e89429-9e61-4764-9f49-9063cf8ea9ba" (UID: "24e89429-9e61-4764-9f49-9063cf8ea9ba"). InnerVolumeSpecName "kube-api-access-zm78j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.640820 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca9cc8e6-753f-4235-a1f3-d547472c47c2-kube-api-access-sjbvv" (OuterVolumeSpecName: "kube-api-access-sjbvv") pod "ca9cc8e6-753f-4235-a1f3-d547472c47c2" (UID: "ca9cc8e6-753f-4235-a1f3-d547472c47c2"). InnerVolumeSpecName "kube-api-access-sjbvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.670432 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" event={"ID":"24e89429-9e61-4764-9f49-9063cf8ea9ba","Type":"ContainerDied","Data":"f7c0a326580000beb51b355491a4b907be48140b8103a67fe0f03ccbe30489e3"} Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.670729 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8575fc99d7-qsgn7" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.672245 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77597f887-9hf9x" event={"ID":"ca9cc8e6-753f-4235-a1f3-d547472c47c2","Type":"ContainerDied","Data":"3766d3c2bccce39cf57e534e5762064abf3f1dc64cf6677f9a69e356749ad56d"} Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.672301 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77597f887-9hf9x" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.729925 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8575fc99d7-qsgn7"] Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.733622 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8575fc99d7-qsgn7"] Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.740824 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjbvv\" (UniqueName: \"kubernetes.io/projected/ca9cc8e6-753f-4235-a1f3-d547472c47c2-kube-api-access-sjbvv\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.740866 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zm78j\" (UniqueName: \"kubernetes.io/projected/24e89429-9e61-4764-9f49-9063cf8ea9ba-kube-api-access-zm78j\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.773557 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77597f887-9hf9x"] Oct 11 07:13:09 crc kubenswrapper[5055]: I1011 07:13:09.780442 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77597f887-9hf9x"] Oct 11 07:13:10 crc kubenswrapper[5055]: I1011 07:13:10.165040 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6b595d95-lbk7j"] Oct 11 07:13:10 crc kubenswrapper[5055]: I1011 07:13:10.325229 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-p2xsl"] Oct 11 07:13:10 crc kubenswrapper[5055]: I1011 07:13:10.333898 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-dc9d58d7-f6scn"] Oct 11 07:13:10 crc kubenswrapper[5055]: W1011 07:13:10.408756 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4cf19540_dac5_44be_a6be_7ab53926529c.slice/crio-794d25ca317dec0d9bec305d3fe5d332c3baacd685141ee1696ebd53b7d43bd8 WatchSource:0}: Error finding container 794d25ca317dec0d9bec305d3fe5d332c3baacd685141ee1696ebd53b7d43bd8: Status 404 returned error can't find the container with id 794d25ca317dec0d9bec305d3fe5d332c3baacd685141ee1696ebd53b7d43bd8 Oct 11 07:13:10 crc kubenswrapper[5055]: W1011 07:13:10.412571 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ef1af2d_934c_42a6_9206_f754332e3bec.slice/crio-4fa8ff6e5d3febd0848008abf884a769f42dbbb5ae963e0655a21ef1d7dd0085 WatchSource:0}: Error finding container 4fa8ff6e5d3febd0848008abf884a769f42dbbb5ae963e0655a21ef1d7dd0085: Status 404 returned error can't find the container with id 4fa8ff6e5d3febd0848008abf884a769f42dbbb5ae963e0655a21ef1d7dd0085 Oct 11 07:13:10 crc kubenswrapper[5055]: I1011 07:13:10.698563 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" event={"ID":"2ef1af2d-934c-42a6-9206-f754332e3bec","Type":"ContainerStarted","Data":"4fa8ff6e5d3febd0848008abf884a769f42dbbb5ae963e0655a21ef1d7dd0085"} Oct 11 07:13:10 crc kubenswrapper[5055]: I1011 07:13:10.707617 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"01de7a83-4a08-494a-a3b9-774e6787e30f","Type":"ContainerStarted","Data":"5433269dd3db71b29ff92097b8c5e94a07217bab29089e9289f976033732e76a"} Oct 11 07:13:10 crc kubenswrapper[5055]: I1011 07:13:10.710393 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-p2xsl" event={"ID":"beb2c0e8-4291-454c-aae1-28c186965761","Type":"ContainerStarted","Data":"cb83d0fb3a7d29f9fb8ef3b4abb6f76d95731049ea56fa1c540c2bbfbcb54d98"} Oct 11 07:13:10 crc kubenswrapper[5055]: I1011 07:13:10.714562 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" event={"ID":"4cf19540-dac5-44be-a6be-7ab53926529c","Type":"ContainerStarted","Data":"794d25ca317dec0d9bec305d3fe5d332c3baacd685141ee1696ebd53b7d43bd8"} Oct 11 07:13:10 crc kubenswrapper[5055]: I1011 07:13:10.717027 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b6c56af9-552f-4b5a-9648-f28af1328534","Type":"ContainerStarted","Data":"5df485ac2f70f05a177e6e141ec2dc723c3f8a0fa7ae8342e45af4f8db485f9b"} Oct 11 07:13:10 crc kubenswrapper[5055]: I1011 07:13:10.733190 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=33.733175049 podStartE2EDuration="33.733175049s" podCreationTimestamp="2025-10-11 07:12:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:13:10.726566953 +0000 UTC m=+1174.500840760" watchObservedRunningTime="2025-10-11 07:13:10.733175049 +0000 UTC m=+1174.507448856" Oct 11 07:13:10 crc kubenswrapper[5055]: I1011 07:13:10.753583 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=12.425931311 podStartE2EDuration="34.753564786s" podCreationTimestamp="2025-10-11 07:12:36 +0000 UTC" firstStartedPulling="2025-10-11 07:12:42.161786752 +0000 UTC m=+1145.936060559" lastFinishedPulling="2025-10-11 07:13:04.489420227 +0000 UTC m=+1168.263694034" observedRunningTime="2025-10-11 07:13:10.749243324 +0000 UTC m=+1174.523517131" watchObservedRunningTime="2025-10-11 07:13:10.753564786 +0000 UTC m=+1174.527838593" Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.005882 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24e89429-9e61-4764-9f49-9063cf8ea9ba" path="/var/lib/kubelet/pods/24e89429-9e61-4764-9f49-9063cf8ea9ba/volumes" Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.006487 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca9cc8e6-753f-4235-a1f3-d547472c47c2" path="/var/lib/kubelet/pods/ca9cc8e6-753f-4235-a1f3-d547472c47c2/volumes" Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.731263 5055 generic.go:334] "Generic (PLEG): container finished" podID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerID="854b6b5e925a5ffd9c7c9e173a7725c96891ecbf00c99076ae697f90314388b4" exitCode=0 Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.733180 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-5t8kh" event={"ID":"c5b9917d-52e6-4d08-a131-4d1b8b809161","Type":"ContainerDied","Data":"854b6b5e925a5ffd9c7c9e173a7725c96891ecbf00c99076ae697f90314388b4"} Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.739393 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"728d8a1e-9c94-49ce-94f4-491bf34a9b16","Type":"ContainerStarted","Data":"0461a09292ab9015ca5bd80c2894512da59d2f14cb2c366a1cfd852dcb6c1234"} Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.744375 5055 generic.go:334] "Generic (PLEG): container finished" podID="4cf19540-dac5-44be-a6be-7ab53926529c" containerID="3a00a018bcfa3777d60869c701016901e6d7429871d8e74222a7cc2988c577d8" exitCode=0 Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.744636 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" event={"ID":"4cf19540-dac5-44be-a6be-7ab53926529c","Type":"ContainerDied","Data":"3a00a018bcfa3777d60869c701016901e6d7429871d8e74222a7cc2988c577d8"} Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.747205 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"1bebbd2e-a315-493d-820b-69e8dc749ee1","Type":"ContainerStarted","Data":"10d71784edd3f6ec2fbce87c08a6c3ea956ccb7e0b266974fc673f0bc6f0afd2"} Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.747846 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.753986 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5a2fd839-7187-4fce-be78-2a911103f8cc","Type":"ContainerStarted","Data":"592f9a3bef2915f38f89bd3def34fbbcb1bece166f2fe29ce499fa62c6995ab3"} Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.758422 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"cae844be-3146-4234-b3f8-6c3aba5defe8","Type":"ContainerStarted","Data":"4d315b6dea273ee157466cbc1b38c29f7c8fa1c21b574cd63842a2e7a3f4eb71"} Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.758575 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.760223 5055 generic.go:334] "Generic (PLEG): container finished" podID="2ef1af2d-934c-42a6-9206-f754332e3bec" containerID="fd76bf1a8e5506f0bfe0a0cbbad248adbb9a6c897c9e4b930f64b3104894c484" exitCode=0 Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.760255 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" event={"ID":"2ef1af2d-934c-42a6-9206-f754332e3bec","Type":"ContainerDied","Data":"fd76bf1a8e5506f0bfe0a0cbbad248adbb9a6c897c9e4b930f64b3104894c484"} Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.762459 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2l5hh" event={"ID":"51322291-81d0-4cbc-a761-0294a8365fd3","Type":"ContainerStarted","Data":"027b40b9cb9c6878b9ee08889d959efee5dacfac1f9095f8704e25123d3fecd3"} Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.762652 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-2l5hh" Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.802130 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=28.417950114 podStartE2EDuration="33.802111038s" podCreationTimestamp="2025-10-11 07:12:38 +0000 UTC" firstStartedPulling="2025-10-11 07:13:04.289244587 +0000 UTC m=+1168.063518394" lastFinishedPulling="2025-10-11 07:13:09.673405511 +0000 UTC m=+1173.447679318" observedRunningTime="2025-10-11 07:13:11.800197814 +0000 UTC m=+1175.574471631" watchObservedRunningTime="2025-10-11 07:13:11.802111038 +0000 UTC m=+1175.576384855" Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.834646 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=25.750359466 podStartE2EDuration="31.834621957s" podCreationTimestamp="2025-10-11 07:12:40 +0000 UTC" firstStartedPulling="2025-10-11 07:13:04.456456544 +0000 UTC m=+1168.230730351" lastFinishedPulling="2025-10-11 07:13:10.540719035 +0000 UTC m=+1174.314992842" observedRunningTime="2025-10-11 07:13:11.815345552 +0000 UTC m=+1175.589619369" watchObservedRunningTime="2025-10-11 07:13:11.834621957 +0000 UTC m=+1175.608895764" Oct 11 07:13:11 crc kubenswrapper[5055]: I1011 07:13:11.856079 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-2l5hh" podStartSLOduration=22.36917901 podStartE2EDuration="27.856061924s" podCreationTimestamp="2025-10-11 07:12:44 +0000 UTC" firstStartedPulling="2025-10-11 07:13:04.186582208 +0000 UTC m=+1167.960856015" lastFinishedPulling="2025-10-11 07:13:09.673465122 +0000 UTC m=+1173.447738929" observedRunningTime="2025-10-11 07:13:11.852129903 +0000 UTC m=+1175.626403710" watchObservedRunningTime="2025-10-11 07:13:11.856061924 +0000 UTC m=+1175.630335731" Oct 11 07:13:12 crc kubenswrapper[5055]: E1011 07:13:12.291233 5055 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.162:36218->38.102.83.162:45063: write tcp 38.102.83.162:36218->38.102.83.162:45063: write: broken pipe Oct 11 07:13:12 crc kubenswrapper[5055]: I1011 07:13:12.772419 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" event={"ID":"4cf19540-dac5-44be-a6be-7ab53926529c","Type":"ContainerStarted","Data":"8e212e3da1f8ece47b404181f01fd0a2ce843cdd7a9de2094a3d2c852fbef43f"} Oct 11 07:13:12 crc kubenswrapper[5055]: I1011 07:13:12.772624 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:12 crc kubenswrapper[5055]: I1011 07:13:12.774601 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" event={"ID":"2ef1af2d-934c-42a6-9206-f754332e3bec","Type":"ContainerStarted","Data":"ee0eea2e23890f5c17d8438cf0af07e8437b14463c804fcb7ea74011aceb39fa"} Oct 11 07:13:12 crc kubenswrapper[5055]: I1011 07:13:12.774688 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:12 crc kubenswrapper[5055]: I1011 07:13:12.777051 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-5t8kh" event={"ID":"c5b9917d-52e6-4d08-a131-4d1b8b809161","Type":"ContainerStarted","Data":"9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a"} Oct 11 07:13:12 crc kubenswrapper[5055]: I1011 07:13:12.795977 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" podStartSLOduration=4.341823206 podStartE2EDuration="4.795953782s" podCreationTimestamp="2025-10-11 07:13:08 +0000 UTC" firstStartedPulling="2025-10-11 07:13:10.410298086 +0000 UTC m=+1174.184571893" lastFinishedPulling="2025-10-11 07:13:10.864428662 +0000 UTC m=+1174.638702469" observedRunningTime="2025-10-11 07:13:12.788212593 +0000 UTC m=+1176.562486410" watchObservedRunningTime="2025-10-11 07:13:12.795953782 +0000 UTC m=+1176.570227589" Oct 11 07:13:12 crc kubenswrapper[5055]: E1011 07:13:12.796083 5055 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.162:36246->38.102.83.162:45063: write tcp 38.102.83.162:36246->38.102.83.162:45063: write: broken pipe Oct 11 07:13:12 crc kubenswrapper[5055]: I1011 07:13:12.808460 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" podStartSLOduration=4.36744489 podStartE2EDuration="4.808442715s" podCreationTimestamp="2025-10-11 07:13:08 +0000 UTC" firstStartedPulling="2025-10-11 07:13:10.421070191 +0000 UTC m=+1174.195343998" lastFinishedPulling="2025-10-11 07:13:10.862068016 +0000 UTC m=+1174.636341823" observedRunningTime="2025-10-11 07:13:12.807038805 +0000 UTC m=+1176.581312612" watchObservedRunningTime="2025-10-11 07:13:12.808442715 +0000 UTC m=+1176.582716532" Oct 11 07:13:13 crc kubenswrapper[5055]: I1011 07:13:13.784845 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-p2xsl" event={"ID":"beb2c0e8-4291-454c-aae1-28c186965761","Type":"ContainerStarted","Data":"f24f414c261ba713dd02a74d75b54a85809df878427bfe504f61940fd8a374e3"} Oct 11 07:13:13 crc kubenswrapper[5055]: I1011 07:13:13.786699 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"728d8a1e-9c94-49ce-94f4-491bf34a9b16","Type":"ContainerStarted","Data":"4150f4280b66ed07a18507fa2549cb34d90ba3d5f369125336c5e32bea64bc9a"} Oct 11 07:13:13 crc kubenswrapper[5055]: I1011 07:13:13.788178 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5a2fd839-7187-4fce-be78-2a911103f8cc","Type":"ContainerStarted","Data":"0f67a32ae0f476c9f1b1e19bc12c766c0df90a9fc5070dfdfc272f9a4725310f"} Oct 11 07:13:13 crc kubenswrapper[5055]: I1011 07:13:13.790110 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-5t8kh" event={"ID":"c5b9917d-52e6-4d08-a131-4d1b8b809161","Type":"ContainerStarted","Data":"412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18"} Oct 11 07:13:13 crc kubenswrapper[5055]: I1011 07:13:13.790664 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:13:13 crc kubenswrapper[5055]: I1011 07:13:13.802098 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-p2xsl" podStartSLOduration=2.796608254 podStartE2EDuration="5.802081432s" podCreationTimestamp="2025-10-11 07:13:08 +0000 UTC" firstStartedPulling="2025-10-11 07:13:10.409085502 +0000 UTC m=+1174.183359309" lastFinishedPulling="2025-10-11 07:13:13.41455868 +0000 UTC m=+1177.188832487" observedRunningTime="2025-10-11 07:13:13.800421865 +0000 UTC m=+1177.574695662" watchObservedRunningTime="2025-10-11 07:13:13.802081432 +0000 UTC m=+1177.576355239" Oct 11 07:13:13 crc kubenswrapper[5055]: I1011 07:13:13.821113 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=17.88794561 podStartE2EDuration="26.82109353s" podCreationTimestamp="2025-10-11 07:12:47 +0000 UTC" firstStartedPulling="2025-10-11 07:13:04.456099984 +0000 UTC m=+1168.230373801" lastFinishedPulling="2025-10-11 07:13:13.389247914 +0000 UTC m=+1177.163521721" observedRunningTime="2025-10-11 07:13:13.817664173 +0000 UTC m=+1177.591937980" watchObservedRunningTime="2025-10-11 07:13:13.82109353 +0000 UTC m=+1177.595367347" Oct 11 07:13:13 crc kubenswrapper[5055]: I1011 07:13:13.847671 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=20.905928389 podStartE2EDuration="29.847646731s" podCreationTimestamp="2025-10-11 07:12:44 +0000 UTC" firstStartedPulling="2025-10-11 07:13:04.456483545 +0000 UTC m=+1168.230757352" lastFinishedPulling="2025-10-11 07:13:13.398201887 +0000 UTC m=+1177.172475694" observedRunningTime="2025-10-11 07:13:13.839228673 +0000 UTC m=+1177.613502480" watchObservedRunningTime="2025-10-11 07:13:13.847646731 +0000 UTC m=+1177.621920538" Oct 11 07:13:13 crc kubenswrapper[5055]: I1011 07:13:13.877599 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-5t8kh" podStartSLOduration=25.055588014 podStartE2EDuration="29.877576138s" podCreationTimestamp="2025-10-11 07:12:44 +0000 UTC" firstStartedPulling="2025-10-11 07:13:04.79531569 +0000 UTC m=+1168.569589497" lastFinishedPulling="2025-10-11 07:13:09.617303814 +0000 UTC m=+1173.391577621" observedRunningTime="2025-10-11 07:13:13.872293998 +0000 UTC m=+1177.646567815" watchObservedRunningTime="2025-10-11 07:13:13.877576138 +0000 UTC m=+1177.651849945" Oct 11 07:13:14 crc kubenswrapper[5055]: I1011 07:13:14.801364 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:13:15 crc kubenswrapper[5055]: I1011 07:13:15.694343 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Oct 11 07:13:15 crc kubenswrapper[5055]: I1011 07:13:15.746939 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Oct 11 07:13:15 crc kubenswrapper[5055]: I1011 07:13:15.760431 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Oct 11 07:13:15 crc kubenswrapper[5055]: I1011 07:13:15.760738 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Oct 11 07:13:15 crc kubenswrapper[5055]: I1011 07:13:15.807002 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Oct 11 07:13:15 crc kubenswrapper[5055]: I1011 07:13:15.812482 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Oct 11 07:13:15 crc kubenswrapper[5055]: I1011 07:13:15.854120 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Oct 11 07:13:16 crc kubenswrapper[5055]: I1011 07:13:16.845579 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Oct 11 07:13:16 crc kubenswrapper[5055]: I1011 07:13:16.972936 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Oct 11 07:13:16 crc kubenswrapper[5055]: I1011 07:13:16.974612 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 11 07:13:16 crc kubenswrapper[5055]: I1011 07:13:16.976594 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Oct 11 07:13:16 crc kubenswrapper[5055]: I1011 07:13:16.976929 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-tj2dz" Oct 11 07:13:16 crc kubenswrapper[5055]: I1011 07:13:16.976962 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Oct 11 07:13:16 crc kubenswrapper[5055]: I1011 07:13:16.985973 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.007417 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.007601 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4d5kj\" (UniqueName: \"kubernetes.io/projected/86907505-2c19-41ed-b72d-0b8bbeab1eec-kube-api-access-4d5kj\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.007933 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.008020 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86907505-2c19-41ed-b72d-0b8bbeab1eec-scripts\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.008076 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86907505-2c19-41ed-b72d-0b8bbeab1eec-config\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.008113 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/86907505-2c19-41ed-b72d-0b8bbeab1eec-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.008199 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.017703 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.110069 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.110174 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.110229 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4d5kj\" (UniqueName: \"kubernetes.io/projected/86907505-2c19-41ed-b72d-0b8bbeab1eec-kube-api-access-4d5kj\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.110302 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.110331 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86907505-2c19-41ed-b72d-0b8bbeab1eec-scripts\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.110355 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86907505-2c19-41ed-b72d-0b8bbeab1eec-config\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.110376 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/86907505-2c19-41ed-b72d-0b8bbeab1eec-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.110928 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/86907505-2c19-41ed-b72d-0b8bbeab1eec-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.111663 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86907505-2c19-41ed-b72d-0b8bbeab1eec-scripts\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.111689 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86907505-2c19-41ed-b72d-0b8bbeab1eec-config\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.115979 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.116495 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.116653 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.128253 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4d5kj\" (UniqueName: \"kubernetes.io/projected/86907505-2c19-41ed-b72d-0b8bbeab1eec-kube-api-access-4d5kj\") pod \"ovn-northd-0\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.292810 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.634858 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.635365 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.681949 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.732482 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Oct 11 07:13:17 crc kubenswrapper[5055]: W1011 07:13:17.737810 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod86907505_2c19_41ed_b72d_0b8bbeab1eec.slice/crio-7671a3c8e53a3012cc95874e8cd5aecd0cd38d3f4d1ffe9c66160a30d833a91f WatchSource:0}: Error finding container 7671a3c8e53a3012cc95874e8cd5aecd0cd38d3f4d1ffe9c66160a30d833a91f: Status 404 returned error can't find the container with id 7671a3c8e53a3012cc95874e8cd5aecd0cd38d3f4d1ffe9c66160a30d833a91f Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.821576 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"86907505-2c19-41ed-b72d-0b8bbeab1eec","Type":"ContainerStarted","Data":"7671a3c8e53a3012cc95874e8cd5aecd0cd38d3f4d1ffe9c66160a30d833a91f"} Oct 11 07:13:17 crc kubenswrapper[5055]: I1011 07:13:17.865284 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Oct 11 07:13:18 crc kubenswrapper[5055]: I1011 07:13:18.861326 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Oct 11 07:13:18 crc kubenswrapper[5055]: I1011 07:13:18.861708 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Oct 11 07:13:18 crc kubenswrapper[5055]: I1011 07:13:18.914703 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.010389 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-t794s"] Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.011513 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-t794s" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.017216 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-t794s"] Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.119586 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-ktc4n"] Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.120890 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ktc4n" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.127250 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-ktc4n"] Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.140415 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9px8h\" (UniqueName: \"kubernetes.io/projected/a6501c99-b98e-4311-a419-5ed815080287-kube-api-access-9px8h\") pod \"keystone-db-create-t794s\" (UID: \"a6501c99-b98e-4311-a419-5ed815080287\") " pod="openstack/keystone-db-create-t794s" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.242546 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52zlm\" (UniqueName: \"kubernetes.io/projected/0d54edae-40fd-42a3-b80f-d0633ea306e1-kube-api-access-52zlm\") pod \"placement-db-create-ktc4n\" (UID: \"0d54edae-40fd-42a3-b80f-d0633ea306e1\") " pod="openstack/placement-db-create-ktc4n" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.242610 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9px8h\" (UniqueName: \"kubernetes.io/projected/a6501c99-b98e-4311-a419-5ed815080287-kube-api-access-9px8h\") pod \"keystone-db-create-t794s\" (UID: \"a6501c99-b98e-4311-a419-5ed815080287\") " pod="openstack/keystone-db-create-t794s" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.249893 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.260592 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9px8h\" (UniqueName: \"kubernetes.io/projected/a6501c99-b98e-4311-a419-5ed815080287-kube-api-access-9px8h\") pod \"keystone-db-create-t794s\" (UID: \"a6501c99-b98e-4311-a419-5ed815080287\") " pod="openstack/keystone-db-create-t794s" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.337282 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-t794s" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.343839 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52zlm\" (UniqueName: \"kubernetes.io/projected/0d54edae-40fd-42a3-b80f-d0633ea306e1-kube-api-access-52zlm\") pod \"placement-db-create-ktc4n\" (UID: \"0d54edae-40fd-42a3-b80f-d0633ea306e1\") " pod="openstack/placement-db-create-ktc4n" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.363602 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52zlm\" (UniqueName: \"kubernetes.io/projected/0d54edae-40fd-42a3-b80f-d0633ea306e1-kube-api-access-52zlm\") pod \"placement-db-create-ktc4n\" (UID: \"0d54edae-40fd-42a3-b80f-d0633ea306e1\") " pod="openstack/placement-db-create-ktc4n" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.375921 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.379963 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.410637 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-nr42l"] Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.411661 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-nr42l" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.419582 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-nr42l"] Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.460189 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6b595d95-lbk7j"] Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.499613 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ktc4n" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.546646 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rhhd\" (UniqueName: \"kubernetes.io/projected/41210913-fc01-40fc-ae9a-c11bd4e58345-kube-api-access-6rhhd\") pod \"glance-db-create-nr42l\" (UID: \"41210913-fc01-40fc-ae9a-c11bd4e58345\") " pod="openstack/glance-db-create-nr42l" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.648265 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rhhd\" (UniqueName: \"kubernetes.io/projected/41210913-fc01-40fc-ae9a-c11bd4e58345-kube-api-access-6rhhd\") pod \"glance-db-create-nr42l\" (UID: \"41210913-fc01-40fc-ae9a-c11bd4e58345\") " pod="openstack/glance-db-create-nr42l" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.666750 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rhhd\" (UniqueName: \"kubernetes.io/projected/41210913-fc01-40fc-ae9a-c11bd4e58345-kube-api-access-6rhhd\") pod \"glance-db-create-nr42l\" (UID: \"41210913-fc01-40fc-ae9a-c11bd4e58345\") " pod="openstack/glance-db-create-nr42l" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.729269 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-nr42l" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.808502 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-t794s"] Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.846105 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-t794s" event={"ID":"a6501c99-b98e-4311-a419-5ed815080287","Type":"ContainerStarted","Data":"80e3e254a7d0f2bbc92ab7713f398f9c27bb47b4b64b81d47277cb87b82f344a"} Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.847898 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"86907505-2c19-41ed-b72d-0b8bbeab1eec","Type":"ContainerStarted","Data":"5da9581c5841f93d450f53683dcf5e30ef31a1b137be5b1ebde5cd58b90187ab"} Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.847958 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"86907505-2c19-41ed-b72d-0b8bbeab1eec","Type":"ContainerStarted","Data":"85214f65e9b2ae3cca29ac8e1565c1deb2a9429c77e7931a85221321ed7db87e"} Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.848318 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.848493 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" podUID="4cf19540-dac5-44be-a6be-7ab53926529c" containerName="dnsmasq-dns" containerID="cri-o://8e212e3da1f8ece47b404181f01fd0a2ce843cdd7a9de2094a3d2c852fbef43f" gracePeriod=10 Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.905693 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.935231 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.622710448 podStartE2EDuration="3.935205867s" podCreationTimestamp="2025-10-11 07:13:16 +0000 UTC" firstStartedPulling="2025-10-11 07:13:17.740142522 +0000 UTC m=+1181.514416329" lastFinishedPulling="2025-10-11 07:13:19.052637941 +0000 UTC m=+1182.826911748" observedRunningTime="2025-10-11 07:13:19.872284167 +0000 UTC m=+1183.646557974" watchObservedRunningTime="2025-10-11 07:13:19.935205867 +0000 UTC m=+1183.709479674" Oct 11 07:13:19 crc kubenswrapper[5055]: I1011 07:13:19.956659 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-ktc4n"] Oct 11 07:13:19 crc kubenswrapper[5055]: W1011 07:13:19.967159 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d54edae_40fd_42a3_b80f_d0633ea306e1.slice/crio-12b3833a679a5f913e2f2492d842f5a12c7be5af9a1572484e6fe22be80aef4a WatchSource:0}: Error finding container 12b3833a679a5f913e2f2492d842f5a12c7be5af9a1572484e6fe22be80aef4a: Status 404 returned error can't find the container with id 12b3833a679a5f913e2f2492d842f5a12c7be5af9a1572484e6fe22be80aef4a Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.185275 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-nr42l"] Oct 11 07:13:20 crc kubenswrapper[5055]: W1011 07:13:20.220355 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod41210913_fc01_40fc_ae9a_c11bd4e58345.slice/crio-dbe91e6d88490d0c2b855822c769ed3daed1362efc5902d1adae526201eade05 WatchSource:0}: Error finding container dbe91e6d88490d0c2b855822c769ed3daed1362efc5902d1adae526201eade05: Status 404 returned error can't find the container with id dbe91e6d88490d0c2b855822c769ed3daed1362efc5902d1adae526201eade05 Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.357457 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.458344 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-dns-svc\") pod \"4cf19540-dac5-44be-a6be-7ab53926529c\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.458451 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-ovsdbserver-nb\") pod \"4cf19540-dac5-44be-a6be-7ab53926529c\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.458501 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zcp4l\" (UniqueName: \"kubernetes.io/projected/4cf19540-dac5-44be-a6be-7ab53926529c-kube-api-access-zcp4l\") pod \"4cf19540-dac5-44be-a6be-7ab53926529c\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.458597 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-config\") pod \"4cf19540-dac5-44be-a6be-7ab53926529c\" (UID: \"4cf19540-dac5-44be-a6be-7ab53926529c\") " Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.465540 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cf19540-dac5-44be-a6be-7ab53926529c-kube-api-access-zcp4l" (OuterVolumeSpecName: "kube-api-access-zcp4l") pod "4cf19540-dac5-44be-a6be-7ab53926529c" (UID: "4cf19540-dac5-44be-a6be-7ab53926529c"). InnerVolumeSpecName "kube-api-access-zcp4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.501403 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4cf19540-dac5-44be-a6be-7ab53926529c" (UID: "4cf19540-dac5-44be-a6be-7ab53926529c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.501420 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4cf19540-dac5-44be-a6be-7ab53926529c" (UID: "4cf19540-dac5-44be-a6be-7ab53926529c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.502370 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-config" (OuterVolumeSpecName: "config") pod "4cf19540-dac5-44be-a6be-7ab53926529c" (UID: "4cf19540-dac5-44be-a6be-7ab53926529c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.560236 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.560287 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.560300 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zcp4l\" (UniqueName: \"kubernetes.io/projected/4cf19540-dac5-44be-a6be-7ab53926529c-kube-api-access-zcp4l\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.560310 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf19540-dac5-44be-a6be-7ab53926529c-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.790698 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7b587f8db7-7qnbh"] Oct 11 07:13:20 crc kubenswrapper[5055]: E1011 07:13:20.792123 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf19540-dac5-44be-a6be-7ab53926529c" containerName="dnsmasq-dns" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.792229 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf19540-dac5-44be-a6be-7ab53926529c" containerName="dnsmasq-dns" Oct 11 07:13:20 crc kubenswrapper[5055]: E1011 07:13:20.792331 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf19540-dac5-44be-a6be-7ab53926529c" containerName="init" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.792410 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf19540-dac5-44be-a6be-7ab53926529c" containerName="init" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.792707 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf19540-dac5-44be-a6be-7ab53926529c" containerName="dnsmasq-dns" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.793849 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.809975 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b587f8db7-7qnbh"] Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.812111 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.868604 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzm74\" (UniqueName: \"kubernetes.io/projected/ab2cd371-182f-4750-9439-b80dea4a9310-kube-api-access-jzm74\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.868735 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-ovsdbserver-nb\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.868781 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-config\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.868828 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-ovsdbserver-sb\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.868874 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-dns-svc\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.878447 5055 generic.go:334] "Generic (PLEG): container finished" podID="a6501c99-b98e-4311-a419-5ed815080287" containerID="413da539315ac7f5d02d327de99cd69f5a9c19ea438f0509a84e853069c698fa" exitCode=0 Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.878557 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-t794s" event={"ID":"a6501c99-b98e-4311-a419-5ed815080287","Type":"ContainerDied","Data":"413da539315ac7f5d02d327de99cd69f5a9c19ea438f0509a84e853069c698fa"} Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.888178 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-nr42l" event={"ID":"41210913-fc01-40fc-ae9a-c11bd4e58345","Type":"ContainerDied","Data":"db55481d25e075d5a8749f7cb0165db216a25a2edbe0f1d4b9d68ef4315be0ed"} Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.890205 5055 generic.go:334] "Generic (PLEG): container finished" podID="41210913-fc01-40fc-ae9a-c11bd4e58345" containerID="db55481d25e075d5a8749f7cb0165db216a25a2edbe0f1d4b9d68ef4315be0ed" exitCode=0 Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.890326 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-nr42l" event={"ID":"41210913-fc01-40fc-ae9a-c11bd4e58345","Type":"ContainerStarted","Data":"dbe91e6d88490d0c2b855822c769ed3daed1362efc5902d1adae526201eade05"} Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.898686 5055 generic.go:334] "Generic (PLEG): container finished" podID="0d54edae-40fd-42a3-b80f-d0633ea306e1" containerID="4dee5f6fda69f3982630460428d43cda49bcbb6b70d07bed8e892c763c82310f" exitCode=0 Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.898798 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ktc4n" event={"ID":"0d54edae-40fd-42a3-b80f-d0633ea306e1","Type":"ContainerDied","Data":"4dee5f6fda69f3982630460428d43cda49bcbb6b70d07bed8e892c763c82310f"} Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.898830 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ktc4n" event={"ID":"0d54edae-40fd-42a3-b80f-d0633ea306e1","Type":"ContainerStarted","Data":"12b3833a679a5f913e2f2492d842f5a12c7be5af9a1572484e6fe22be80aef4a"} Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.906582 5055 generic.go:334] "Generic (PLEG): container finished" podID="4cf19540-dac5-44be-a6be-7ab53926529c" containerID="8e212e3da1f8ece47b404181f01fd0a2ce843cdd7a9de2094a3d2c852fbef43f" exitCode=0 Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.906669 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.906704 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" event={"ID":"4cf19540-dac5-44be-a6be-7ab53926529c","Type":"ContainerDied","Data":"8e212e3da1f8ece47b404181f01fd0a2ce843cdd7a9de2094a3d2c852fbef43f"} Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.906733 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6b595d95-lbk7j" event={"ID":"4cf19540-dac5-44be-a6be-7ab53926529c","Type":"ContainerDied","Data":"794d25ca317dec0d9bec305d3fe5d332c3baacd685141ee1696ebd53b7d43bd8"} Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.906753 5055 scope.go:117] "RemoveContainer" containerID="8e212e3da1f8ece47b404181f01fd0a2ce843cdd7a9de2094a3d2c852fbef43f" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.929220 5055 scope.go:117] "RemoveContainer" containerID="3a00a018bcfa3777d60869c701016901e6d7429871d8e74222a7cc2988c577d8" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.971123 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-dns-svc\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.972637 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzm74\" (UniqueName: \"kubernetes.io/projected/ab2cd371-182f-4750-9439-b80dea4a9310-kube-api-access-jzm74\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.972737 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-ovsdbserver-nb\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.972894 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-config\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.973027 5055 scope.go:117] "RemoveContainer" containerID="8e212e3da1f8ece47b404181f01fd0a2ce843cdd7a9de2094a3d2c852fbef43f" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.973175 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-ovsdbserver-sb\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.974012 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-ovsdbserver-sb\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.974213 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-dns-svc\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.974719 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-config\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.974778 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-ovsdbserver-nb\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.978154 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6b595d95-lbk7j"] Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.984205 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f6b595d95-lbk7j"] Oct 11 07:13:20 crc kubenswrapper[5055]: E1011 07:13:20.990844 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e212e3da1f8ece47b404181f01fd0a2ce843cdd7a9de2094a3d2c852fbef43f\": container with ID starting with 8e212e3da1f8ece47b404181f01fd0a2ce843cdd7a9de2094a3d2c852fbef43f not found: ID does not exist" containerID="8e212e3da1f8ece47b404181f01fd0a2ce843cdd7a9de2094a3d2c852fbef43f" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.990891 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e212e3da1f8ece47b404181f01fd0a2ce843cdd7a9de2094a3d2c852fbef43f"} err="failed to get container status \"8e212e3da1f8ece47b404181f01fd0a2ce843cdd7a9de2094a3d2c852fbef43f\": rpc error: code = NotFound desc = could not find container \"8e212e3da1f8ece47b404181f01fd0a2ce843cdd7a9de2094a3d2c852fbef43f\": container with ID starting with 8e212e3da1f8ece47b404181f01fd0a2ce843cdd7a9de2094a3d2c852fbef43f not found: ID does not exist" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.990916 5055 scope.go:117] "RemoveContainer" containerID="3a00a018bcfa3777d60869c701016901e6d7429871d8e74222a7cc2988c577d8" Oct 11 07:13:20 crc kubenswrapper[5055]: E1011 07:13:20.992295 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a00a018bcfa3777d60869c701016901e6d7429871d8e74222a7cc2988c577d8\": container with ID starting with 3a00a018bcfa3777d60869c701016901e6d7429871d8e74222a7cc2988c577d8 not found: ID does not exist" containerID="3a00a018bcfa3777d60869c701016901e6d7429871d8e74222a7cc2988c577d8" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.992526 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a00a018bcfa3777d60869c701016901e6d7429871d8e74222a7cc2988c577d8"} err="failed to get container status \"3a00a018bcfa3777d60869c701016901e6d7429871d8e74222a7cc2988c577d8\": rpc error: code = NotFound desc = could not find container \"3a00a018bcfa3777d60869c701016901e6d7429871d8e74222a7cc2988c577d8\": container with ID starting with 3a00a018bcfa3777d60869c701016901e6d7429871d8e74222a7cc2988c577d8 not found: ID does not exist" Oct 11 07:13:20 crc kubenswrapper[5055]: I1011 07:13:20.996379 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzm74\" (UniqueName: \"kubernetes.io/projected/ab2cd371-182f-4750-9439-b80dea4a9310-kube-api-access-jzm74\") pod \"dnsmasq-dns-7b587f8db7-7qnbh\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:21 crc kubenswrapper[5055]: I1011 07:13:21.011732 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4cf19540-dac5-44be-a6be-7ab53926529c" path="/var/lib/kubelet/pods/4cf19540-dac5-44be-a6be-7ab53926529c/volumes" Oct 11 07:13:21 crc kubenswrapper[5055]: I1011 07:13:21.131374 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:21 crc kubenswrapper[5055]: I1011 07:13:21.567743 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b587f8db7-7qnbh"] Oct 11 07:13:21 crc kubenswrapper[5055]: W1011 07:13:21.573366 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab2cd371_182f_4750_9439_b80dea4a9310.slice/crio-e7d2242b7073148a351af59574cfff89e46a8df720907bd2e11e6e6087496d5a WatchSource:0}: Error finding container e7d2242b7073148a351af59574cfff89e46a8df720907bd2e11e6e6087496d5a: Status 404 returned error can't find the container with id e7d2242b7073148a351af59574cfff89e46a8df720907bd2e11e6e6087496d5a Oct 11 07:13:21 crc kubenswrapper[5055]: I1011 07:13:21.917664 5055 generic.go:334] "Generic (PLEG): container finished" podID="ab2cd371-182f-4750-9439-b80dea4a9310" containerID="fe4d85793b7889616858f460e97af72194d7d4116c8bb6b285c6ab826f170a81" exitCode=0 Oct 11 07:13:21 crc kubenswrapper[5055]: I1011 07:13:21.917790 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" event={"ID":"ab2cd371-182f-4750-9439-b80dea4a9310","Type":"ContainerDied","Data":"fe4d85793b7889616858f460e97af72194d7d4116c8bb6b285c6ab826f170a81"} Oct 11 07:13:21 crc kubenswrapper[5055]: I1011 07:13:21.918165 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" event={"ID":"ab2cd371-182f-4750-9439-b80dea4a9310","Type":"ContainerStarted","Data":"e7d2242b7073148a351af59574cfff89e46a8df720907bd2e11e6e6087496d5a"} Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.024758 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.032320 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.036149 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-qq6pn" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.036450 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.036689 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.036940 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.043543 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.122786 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.122854 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.122920 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b2f75249-4d9f-44bf-af62-de6757d2326a-lock\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.122960 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b2f75249-4d9f-44bf-af62-de6757d2326a-cache\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.123027 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktf56\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-kube-api-access-ktf56\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.229826 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktf56\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-kube-api-access-ktf56\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.230257 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.230306 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.230378 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b2f75249-4d9f-44bf-af62-de6757d2326a-lock\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.230426 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b2f75249-4d9f-44bf-af62-de6757d2326a-cache\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: E1011 07:13:22.230668 5055 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 11 07:13:22 crc kubenswrapper[5055]: E1011 07:13:22.230696 5055 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.230699 5055 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: E1011 07:13:22.230759 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift podName:b2f75249-4d9f-44bf-af62-de6757d2326a nodeName:}" failed. No retries permitted until 2025-10-11 07:13:22.730739233 +0000 UTC m=+1186.505013040 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift") pod "swift-storage-0" (UID: "b2f75249-4d9f-44bf-af62-de6757d2326a") : configmap "swift-ring-files" not found Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.230959 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b2f75249-4d9f-44bf-af62-de6757d2326a-cache\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.231333 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b2f75249-4d9f-44bf-af62-de6757d2326a-lock\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.250597 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktf56\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-kube-api-access-ktf56\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.255527 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.377944 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-t794s" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.385071 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ktc4n" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.415405 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-nr42l" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.440849 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52zlm\" (UniqueName: \"kubernetes.io/projected/0d54edae-40fd-42a3-b80f-d0633ea306e1-kube-api-access-52zlm\") pod \"0d54edae-40fd-42a3-b80f-d0633ea306e1\" (UID: \"0d54edae-40fd-42a3-b80f-d0633ea306e1\") " Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.441128 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9px8h\" (UniqueName: \"kubernetes.io/projected/a6501c99-b98e-4311-a419-5ed815080287-kube-api-access-9px8h\") pod \"a6501c99-b98e-4311-a419-5ed815080287\" (UID: \"a6501c99-b98e-4311-a419-5ed815080287\") " Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.444240 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6501c99-b98e-4311-a419-5ed815080287-kube-api-access-9px8h" (OuterVolumeSpecName: "kube-api-access-9px8h") pod "a6501c99-b98e-4311-a419-5ed815080287" (UID: "a6501c99-b98e-4311-a419-5ed815080287"). InnerVolumeSpecName "kube-api-access-9px8h". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.444978 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d54edae-40fd-42a3-b80f-d0633ea306e1-kube-api-access-52zlm" (OuterVolumeSpecName: "kube-api-access-52zlm") pod "0d54edae-40fd-42a3-b80f-d0633ea306e1" (UID: "0d54edae-40fd-42a3-b80f-d0633ea306e1"). InnerVolumeSpecName "kube-api-access-52zlm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.511055 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-zkpgz"] Oct 11 07:13:22 crc kubenswrapper[5055]: E1011 07:13:22.512036 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6501c99-b98e-4311-a419-5ed815080287" containerName="mariadb-database-create" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.512147 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6501c99-b98e-4311-a419-5ed815080287" containerName="mariadb-database-create" Oct 11 07:13:22 crc kubenswrapper[5055]: E1011 07:13:22.512241 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41210913-fc01-40fc-ae9a-c11bd4e58345" containerName="mariadb-database-create" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.512299 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="41210913-fc01-40fc-ae9a-c11bd4e58345" containerName="mariadb-database-create" Oct 11 07:13:22 crc kubenswrapper[5055]: E1011 07:13:22.512389 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d54edae-40fd-42a3-b80f-d0633ea306e1" containerName="mariadb-database-create" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.512461 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d54edae-40fd-42a3-b80f-d0633ea306e1" containerName="mariadb-database-create" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.512691 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="41210913-fc01-40fc-ae9a-c11bd4e58345" containerName="mariadb-database-create" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.512816 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6501c99-b98e-4311-a419-5ed815080287" containerName="mariadb-database-create" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.512939 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d54edae-40fd-42a3-b80f-d0633ea306e1" containerName="mariadb-database-create" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.514059 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.516516 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.516823 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.516548 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.520963 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-zkpgz"] Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.542082 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rhhd\" (UniqueName: \"kubernetes.io/projected/41210913-fc01-40fc-ae9a-c11bd4e58345-kube-api-access-6rhhd\") pod \"41210913-fc01-40fc-ae9a-c11bd4e58345\" (UID: \"41210913-fc01-40fc-ae9a-c11bd4e58345\") " Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.542705 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9px8h\" (UniqueName: \"kubernetes.io/projected/a6501c99-b98e-4311-a419-5ed815080287-kube-api-access-9px8h\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.542732 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52zlm\" (UniqueName: \"kubernetes.io/projected/0d54edae-40fd-42a3-b80f-d0633ea306e1-kube-api-access-52zlm\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.546040 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41210913-fc01-40fc-ae9a-c11bd4e58345-kube-api-access-6rhhd" (OuterVolumeSpecName: "kube-api-access-6rhhd") pod "41210913-fc01-40fc-ae9a-c11bd4e58345" (UID: "41210913-fc01-40fc-ae9a-c11bd4e58345"). InnerVolumeSpecName "kube-api-access-6rhhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.644335 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9gf5\" (UniqueName: \"kubernetes.io/projected/3038824c-7f07-4822-a8b5-a812d34adb18-kube-api-access-d9gf5\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.644402 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-dispersionconf\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.644442 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-swiftconf\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.644462 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-combined-ca-bundle\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.644478 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3038824c-7f07-4822-a8b5-a812d34adb18-etc-swift\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.644494 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3038824c-7f07-4822-a8b5-a812d34adb18-ring-data-devices\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.644513 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3038824c-7f07-4822-a8b5-a812d34adb18-scripts\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.644583 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rhhd\" (UniqueName: \"kubernetes.io/projected/41210913-fc01-40fc-ae9a-c11bd4e58345-kube-api-access-6rhhd\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.745380 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-swiftconf\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.745713 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-combined-ca-bundle\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.745866 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3038824c-7f07-4822-a8b5-a812d34adb18-etc-swift\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.746341 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3038824c-7f07-4822-a8b5-a812d34adb18-ring-data-devices\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.746471 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3038824c-7f07-4822-a8b5-a812d34adb18-scripts\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.746273 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3038824c-7f07-4822-a8b5-a812d34adb18-etc-swift\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.747138 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3038824c-7f07-4822-a8b5-a812d34adb18-scripts\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.747470 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3038824c-7f07-4822-a8b5-a812d34adb18-ring-data-devices\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.747488 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.747663 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9gf5\" (UniqueName: \"kubernetes.io/projected/3038824c-7f07-4822-a8b5-a812d34adb18-kube-api-access-d9gf5\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.747850 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-dispersionconf\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.748746 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-combined-ca-bundle\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: E1011 07:13:22.748905 5055 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 11 07:13:22 crc kubenswrapper[5055]: E1011 07:13:22.748972 5055 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 11 07:13:22 crc kubenswrapper[5055]: E1011 07:13:22.749030 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift podName:b2f75249-4d9f-44bf-af62-de6757d2326a nodeName:}" failed. No retries permitted until 2025-10-11 07:13:23.749010434 +0000 UTC m=+1187.523284291 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift") pod "swift-storage-0" (UID: "b2f75249-4d9f-44bf-af62-de6757d2326a") : configmap "swift-ring-files" not found Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.750327 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-dispersionconf\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.750444 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-swiftconf\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.763106 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9gf5\" (UniqueName: \"kubernetes.io/projected/3038824c-7f07-4822-a8b5-a812d34adb18-kube-api-access-d9gf5\") pod \"swift-ring-rebalance-zkpgz\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.831909 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.932911 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" event={"ID":"ab2cd371-182f-4750-9439-b80dea4a9310","Type":"ContainerStarted","Data":"02982624e7fec032027db301139bfe8b2893cafac8ee3fb79205d98e6447ab84"} Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.934155 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.936426 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-nr42l" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.936476 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-nr42l" event={"ID":"41210913-fc01-40fc-ae9a-c11bd4e58345","Type":"ContainerDied","Data":"dbe91e6d88490d0c2b855822c769ed3daed1362efc5902d1adae526201eade05"} Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.936574 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dbe91e6d88490d0c2b855822c769ed3daed1362efc5902d1adae526201eade05" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.937998 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ktc4n" event={"ID":"0d54edae-40fd-42a3-b80f-d0633ea306e1","Type":"ContainerDied","Data":"12b3833a679a5f913e2f2492d842f5a12c7be5af9a1572484e6fe22be80aef4a"} Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.938036 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12b3833a679a5f913e2f2492d842f5a12c7be5af9a1572484e6fe22be80aef4a" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.938103 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ktc4n" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.949556 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-t794s" event={"ID":"a6501c99-b98e-4311-a419-5ed815080287","Type":"ContainerDied","Data":"80e3e254a7d0f2bbc92ab7713f398f9c27bb47b4b64b81d47277cb87b82f344a"} Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.949590 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80e3e254a7d0f2bbc92ab7713f398f9c27bb47b4b64b81d47277cb87b82f344a" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.949643 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-t794s" Oct 11 07:13:22 crc kubenswrapper[5055]: I1011 07:13:22.959198 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" podStartSLOduration=2.959182219 podStartE2EDuration="2.959182219s" podCreationTimestamp="2025-10-11 07:13:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:13:22.953680594 +0000 UTC m=+1186.727954401" watchObservedRunningTime="2025-10-11 07:13:22.959182219 +0000 UTC m=+1186.733456026" Oct 11 07:13:23 crc kubenswrapper[5055]: I1011 07:13:23.263035 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-zkpgz"] Oct 11 07:13:23 crc kubenswrapper[5055]: I1011 07:13:23.763490 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:23 crc kubenswrapper[5055]: E1011 07:13:23.763694 5055 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 11 07:13:23 crc kubenswrapper[5055]: E1011 07:13:23.763951 5055 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 11 07:13:23 crc kubenswrapper[5055]: E1011 07:13:23.764011 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift podName:b2f75249-4d9f-44bf-af62-de6757d2326a nodeName:}" failed. No retries permitted until 2025-10-11 07:13:25.763994135 +0000 UTC m=+1189.538267942 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift") pod "swift-storage-0" (UID: "b2f75249-4d9f-44bf-af62-de6757d2326a") : configmap "swift-ring-files" not found Oct 11 07:13:23 crc kubenswrapper[5055]: I1011 07:13:23.958438 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-zkpgz" event={"ID":"3038824c-7f07-4822-a8b5-a812d34adb18","Type":"ContainerStarted","Data":"f8f2d7b9c26ac324caa0efab292966137731dce9d62c0426e9178fdb3f15ee22"} Oct 11 07:13:25 crc kubenswrapper[5055]: I1011 07:13:25.799111 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:25 crc kubenswrapper[5055]: E1011 07:13:25.799339 5055 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 11 07:13:25 crc kubenswrapper[5055]: E1011 07:13:25.799575 5055 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 11 07:13:25 crc kubenswrapper[5055]: E1011 07:13:25.799743 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift podName:b2f75249-4d9f-44bf-af62-de6757d2326a nodeName:}" failed. No retries permitted until 2025-10-11 07:13:29.799719342 +0000 UTC m=+1193.573993149 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift") pod "swift-storage-0" (UID: "b2f75249-4d9f-44bf-af62-de6757d2326a") : configmap "swift-ring-files" not found Oct 11 07:13:27 crc kubenswrapper[5055]: I1011 07:13:27.994104 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-zkpgz" event={"ID":"3038824c-7f07-4822-a8b5-a812d34adb18","Type":"ContainerStarted","Data":"525efc8a319ad46dfee6df76645ecba02f42e0c169294d33d2d7a6d571e09063"} Oct 11 07:13:28 crc kubenswrapper[5055]: I1011 07:13:28.011178 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-zkpgz" podStartSLOduration=2.429539772 podStartE2EDuration="6.011158879s" podCreationTimestamp="2025-10-11 07:13:22 +0000 UTC" firstStartedPulling="2025-10-11 07:13:23.271504444 +0000 UTC m=+1187.045778251" lastFinishedPulling="2025-10-11 07:13:26.853123551 +0000 UTC m=+1190.627397358" observedRunningTime="2025-10-11 07:13:28.007427073 +0000 UTC m=+1191.781700880" watchObservedRunningTime="2025-10-11 07:13:28.011158879 +0000 UTC m=+1191.785432686" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.055241 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-fe2c-account-create-9pdsz"] Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.057478 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-fe2c-account-create-9pdsz" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.066106 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.091018 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-fe2c-account-create-9pdsz"] Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.162438 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24bp2\" (UniqueName: \"kubernetes.io/projected/ad9b1fb9-5569-4981-ba98-1c45da1ba757-kube-api-access-24bp2\") pod \"keystone-fe2c-account-create-9pdsz\" (UID: \"ad9b1fb9-5569-4981-ba98-1c45da1ba757\") " pod="openstack/keystone-fe2c-account-create-9pdsz" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.259550 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-d9e7-account-create-lgw6n"] Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.260629 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d9e7-account-create-lgw6n" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.264017 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24bp2\" (UniqueName: \"kubernetes.io/projected/ad9b1fb9-5569-4981-ba98-1c45da1ba757-kube-api-access-24bp2\") pod \"keystone-fe2c-account-create-9pdsz\" (UID: \"ad9b1fb9-5569-4981-ba98-1c45da1ba757\") " pod="openstack/keystone-fe2c-account-create-9pdsz" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.265003 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.268602 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-d9e7-account-create-lgw6n"] Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.286589 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24bp2\" (UniqueName: \"kubernetes.io/projected/ad9b1fb9-5569-4981-ba98-1c45da1ba757-kube-api-access-24bp2\") pod \"keystone-fe2c-account-create-9pdsz\" (UID: \"ad9b1fb9-5569-4981-ba98-1c45da1ba757\") " pod="openstack/keystone-fe2c-account-create-9pdsz" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.365836 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qk84\" (UniqueName: \"kubernetes.io/projected/b817f811-c9eb-4a75-b166-e379e0747e9f-kube-api-access-8qk84\") pod \"placement-d9e7-account-create-lgw6n\" (UID: \"b817f811-c9eb-4a75-b166-e379e0747e9f\") " pod="openstack/placement-d9e7-account-create-lgw6n" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.377419 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-fe2c-account-create-9pdsz" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.456827 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-5ef3-account-create-xb4mj"] Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.458020 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5ef3-account-create-xb4mj" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.459758 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.466789 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-5ef3-account-create-xb4mj"] Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.470032 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qk84\" (UniqueName: \"kubernetes.io/projected/b817f811-c9eb-4a75-b166-e379e0747e9f-kube-api-access-8qk84\") pod \"placement-d9e7-account-create-lgw6n\" (UID: \"b817f811-c9eb-4a75-b166-e379e0747e9f\") " pod="openstack/placement-d9e7-account-create-lgw6n" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.491438 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qk84\" (UniqueName: \"kubernetes.io/projected/b817f811-c9eb-4a75-b166-e379e0747e9f-kube-api-access-8qk84\") pod \"placement-d9e7-account-create-lgw6n\" (UID: \"b817f811-c9eb-4a75-b166-e379e0747e9f\") " pod="openstack/placement-d9e7-account-create-lgw6n" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.572633 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wh5gq\" (UniqueName: \"kubernetes.io/projected/d331a829-dd7e-4cfb-98f4-dc2e797fdfa8-kube-api-access-wh5gq\") pod \"glance-5ef3-account-create-xb4mj\" (UID: \"d331a829-dd7e-4cfb-98f4-dc2e797fdfa8\") " pod="openstack/glance-5ef3-account-create-xb4mj" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.577092 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d9e7-account-create-lgw6n" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.674205 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wh5gq\" (UniqueName: \"kubernetes.io/projected/d331a829-dd7e-4cfb-98f4-dc2e797fdfa8-kube-api-access-wh5gq\") pod \"glance-5ef3-account-create-xb4mj\" (UID: \"d331a829-dd7e-4cfb-98f4-dc2e797fdfa8\") " pod="openstack/glance-5ef3-account-create-xb4mj" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.690617 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wh5gq\" (UniqueName: \"kubernetes.io/projected/d331a829-dd7e-4cfb-98f4-dc2e797fdfa8-kube-api-access-wh5gq\") pod \"glance-5ef3-account-create-xb4mj\" (UID: \"d331a829-dd7e-4cfb-98f4-dc2e797fdfa8\") " pod="openstack/glance-5ef3-account-create-xb4mj" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.829294 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5ef3-account-create-xb4mj" Oct 11 07:13:29 crc kubenswrapper[5055]: I1011 07:13:29.878365 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:29 crc kubenswrapper[5055]: E1011 07:13:29.878596 5055 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 11 07:13:29 crc kubenswrapper[5055]: E1011 07:13:29.878619 5055 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 11 07:13:29 crc kubenswrapper[5055]: E1011 07:13:29.878670 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift podName:b2f75249-4d9f-44bf-af62-de6757d2326a nodeName:}" failed. No retries permitted until 2025-10-11 07:13:37.878650467 +0000 UTC m=+1201.652924274 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift") pod "swift-storage-0" (UID: "b2f75249-4d9f-44bf-af62-de6757d2326a") : configmap "swift-ring-files" not found Oct 11 07:13:30 crc kubenswrapper[5055]: I1011 07:13:30.012899 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-d9e7-account-create-lgw6n"] Oct 11 07:13:30 crc kubenswrapper[5055]: W1011 07:13:30.019000 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podad9b1fb9_5569_4981_ba98_1c45da1ba757.slice/crio-09cce37b1643f8aeba644d67307883e4faed535170f4b99bf3aee94053036795 WatchSource:0}: Error finding container 09cce37b1643f8aeba644d67307883e4faed535170f4b99bf3aee94053036795: Status 404 returned error can't find the container with id 09cce37b1643f8aeba644d67307883e4faed535170f4b99bf3aee94053036795 Oct 11 07:13:30 crc kubenswrapper[5055]: I1011 07:13:30.019051 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-fe2c-account-create-9pdsz"] Oct 11 07:13:30 crc kubenswrapper[5055]: W1011 07:13:30.020645 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb817f811_c9eb_4a75_b166_e379e0747e9f.slice/crio-5664603ab093a99c8a476132e04653c6630caec321ddf437b506d50b424f1f8f WatchSource:0}: Error finding container 5664603ab093a99c8a476132e04653c6630caec321ddf437b506d50b424f1f8f: Status 404 returned error can't find the container with id 5664603ab093a99c8a476132e04653c6630caec321ddf437b506d50b424f1f8f Oct 11 07:13:30 crc kubenswrapper[5055]: I1011 07:13:30.264410 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-5ef3-account-create-xb4mj"] Oct 11 07:13:30 crc kubenswrapper[5055]: W1011 07:13:30.270853 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd331a829_dd7e_4cfb_98f4_dc2e797fdfa8.slice/crio-aac9fe5aaaad6a1c089916af7833e3fa90906c2b2ea1cb7c7b387873b679292b WatchSource:0}: Error finding container aac9fe5aaaad6a1c089916af7833e3fa90906c2b2ea1cb7c7b387873b679292b: Status 404 returned error can't find the container with id aac9fe5aaaad6a1c089916af7833e3fa90906c2b2ea1cb7c7b387873b679292b Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.021952 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5ef3-account-create-xb4mj" event={"ID":"d331a829-dd7e-4cfb-98f4-dc2e797fdfa8","Type":"ContainerStarted","Data":"aac9fe5aaaad6a1c089916af7833e3fa90906c2b2ea1cb7c7b387873b679292b"} Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.023434 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-fe2c-account-create-9pdsz" event={"ID":"ad9b1fb9-5569-4981-ba98-1c45da1ba757","Type":"ContainerStarted","Data":"5f669ea0d81bde995efd9316f208e7c11cc6d09e1f2f5b5b60e815374539179a"} Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.023514 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-fe2c-account-create-9pdsz" event={"ID":"ad9b1fb9-5569-4981-ba98-1c45da1ba757","Type":"ContainerStarted","Data":"09cce37b1643f8aeba644d67307883e4faed535170f4b99bf3aee94053036795"} Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.024358 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d9e7-account-create-lgw6n" event={"ID":"b817f811-c9eb-4a75-b166-e379e0747e9f","Type":"ContainerStarted","Data":"5664603ab093a99c8a476132e04653c6630caec321ddf437b506d50b424f1f8f"} Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.132937 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.180449 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-dc9d58d7-f6scn"] Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.181177 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" podUID="2ef1af2d-934c-42a6-9206-f754332e3bec" containerName="dnsmasq-dns" containerID="cri-o://ee0eea2e23890f5c17d8438cf0af07e8437b14463c804fcb7ea74011aceb39fa" gracePeriod=10 Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.654866 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.814878 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5rbp\" (UniqueName: \"kubernetes.io/projected/2ef1af2d-934c-42a6-9206-f754332e3bec-kube-api-access-r5rbp\") pod \"2ef1af2d-934c-42a6-9206-f754332e3bec\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.814940 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-ovsdbserver-nb\") pod \"2ef1af2d-934c-42a6-9206-f754332e3bec\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.815067 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-ovsdbserver-sb\") pod \"2ef1af2d-934c-42a6-9206-f754332e3bec\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.815162 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-dns-svc\") pod \"2ef1af2d-934c-42a6-9206-f754332e3bec\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.815217 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-config\") pod \"2ef1af2d-934c-42a6-9206-f754332e3bec\" (UID: \"2ef1af2d-934c-42a6-9206-f754332e3bec\") " Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.822820 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ef1af2d-934c-42a6-9206-f754332e3bec-kube-api-access-r5rbp" (OuterVolumeSpecName: "kube-api-access-r5rbp") pod "2ef1af2d-934c-42a6-9206-f754332e3bec" (UID: "2ef1af2d-934c-42a6-9206-f754332e3bec"). InnerVolumeSpecName "kube-api-access-r5rbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.863815 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-config" (OuterVolumeSpecName: "config") pod "2ef1af2d-934c-42a6-9206-f754332e3bec" (UID: "2ef1af2d-934c-42a6-9206-f754332e3bec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.864261 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2ef1af2d-934c-42a6-9206-f754332e3bec" (UID: "2ef1af2d-934c-42a6-9206-f754332e3bec"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.866318 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2ef1af2d-934c-42a6-9206-f754332e3bec" (UID: "2ef1af2d-934c-42a6-9206-f754332e3bec"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.866965 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2ef1af2d-934c-42a6-9206-f754332e3bec" (UID: "2ef1af2d-934c-42a6-9206-f754332e3bec"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.916886 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.916912 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5rbp\" (UniqueName: \"kubernetes.io/projected/2ef1af2d-934c-42a6-9206-f754332e3bec-kube-api-access-r5rbp\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.916925 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.916935 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:31 crc kubenswrapper[5055]: I1011 07:13:31.916944 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ef1af2d-934c-42a6-9206-f754332e3bec-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.033012 5055 generic.go:334] "Generic (PLEG): container finished" podID="2ef1af2d-934c-42a6-9206-f754332e3bec" containerID="ee0eea2e23890f5c17d8438cf0af07e8437b14463c804fcb7ea74011aceb39fa" exitCode=0 Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.033221 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" event={"ID":"2ef1af2d-934c-42a6-9206-f754332e3bec","Type":"ContainerDied","Data":"ee0eea2e23890f5c17d8438cf0af07e8437b14463c804fcb7ea74011aceb39fa"} Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.033844 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" event={"ID":"2ef1af2d-934c-42a6-9206-f754332e3bec","Type":"ContainerDied","Data":"4fa8ff6e5d3febd0848008abf884a769f42dbbb5ae963e0655a21ef1d7dd0085"} Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.033947 5055 scope.go:117] "RemoveContainer" containerID="ee0eea2e23890f5c17d8438cf0af07e8437b14463c804fcb7ea74011aceb39fa" Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.033352 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-dc9d58d7-f6scn" Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.036907 5055 generic.go:334] "Generic (PLEG): container finished" podID="b817f811-c9eb-4a75-b166-e379e0747e9f" containerID="12e5b5a9e7355897c5a5e7b56f13c98d9c7c5a690e2053ad0ed2cfdfabeddf21" exitCode=0 Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.036992 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d9e7-account-create-lgw6n" event={"ID":"b817f811-c9eb-4a75-b166-e379e0747e9f","Type":"ContainerDied","Data":"12e5b5a9e7355897c5a5e7b56f13c98d9c7c5a690e2053ad0ed2cfdfabeddf21"} Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.040693 5055 generic.go:334] "Generic (PLEG): container finished" podID="d331a829-dd7e-4cfb-98f4-dc2e797fdfa8" containerID="a2009069b1b6468e98ce6fd276203569c2cc14f3b2151d6fb53c24bf78249492" exitCode=0 Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.040800 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5ef3-account-create-xb4mj" event={"ID":"d331a829-dd7e-4cfb-98f4-dc2e797fdfa8","Type":"ContainerDied","Data":"a2009069b1b6468e98ce6fd276203569c2cc14f3b2151d6fb53c24bf78249492"} Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.046476 5055 generic.go:334] "Generic (PLEG): container finished" podID="ad9b1fb9-5569-4981-ba98-1c45da1ba757" containerID="5f669ea0d81bde995efd9316f208e7c11cc6d09e1f2f5b5b60e815374539179a" exitCode=0 Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.046525 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-fe2c-account-create-9pdsz" event={"ID":"ad9b1fb9-5569-4981-ba98-1c45da1ba757","Type":"ContainerDied","Data":"5f669ea0d81bde995efd9316f208e7c11cc6d09e1f2f5b5b60e815374539179a"} Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.073812 5055 scope.go:117] "RemoveContainer" containerID="fd76bf1a8e5506f0bfe0a0cbbad248adbb9a6c897c9e4b930f64b3104894c484" Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.109408 5055 scope.go:117] "RemoveContainer" containerID="ee0eea2e23890f5c17d8438cf0af07e8437b14463c804fcb7ea74011aceb39fa" Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.109674 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-dc9d58d7-f6scn"] Oct 11 07:13:32 crc kubenswrapper[5055]: E1011 07:13:32.111165 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee0eea2e23890f5c17d8438cf0af07e8437b14463c804fcb7ea74011aceb39fa\": container with ID starting with ee0eea2e23890f5c17d8438cf0af07e8437b14463c804fcb7ea74011aceb39fa not found: ID does not exist" containerID="ee0eea2e23890f5c17d8438cf0af07e8437b14463c804fcb7ea74011aceb39fa" Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.111267 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee0eea2e23890f5c17d8438cf0af07e8437b14463c804fcb7ea74011aceb39fa"} err="failed to get container status \"ee0eea2e23890f5c17d8438cf0af07e8437b14463c804fcb7ea74011aceb39fa\": rpc error: code = NotFound desc = could not find container \"ee0eea2e23890f5c17d8438cf0af07e8437b14463c804fcb7ea74011aceb39fa\": container with ID starting with ee0eea2e23890f5c17d8438cf0af07e8437b14463c804fcb7ea74011aceb39fa not found: ID does not exist" Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.111360 5055 scope.go:117] "RemoveContainer" containerID="fd76bf1a8e5506f0bfe0a0cbbad248adbb9a6c897c9e4b930f64b3104894c484" Oct 11 07:13:32 crc kubenswrapper[5055]: E1011 07:13:32.111924 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd76bf1a8e5506f0bfe0a0cbbad248adbb9a6c897c9e4b930f64b3104894c484\": container with ID starting with fd76bf1a8e5506f0bfe0a0cbbad248adbb9a6c897c9e4b930f64b3104894c484 not found: ID does not exist" containerID="fd76bf1a8e5506f0bfe0a0cbbad248adbb9a6c897c9e4b930f64b3104894c484" Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.112025 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd76bf1a8e5506f0bfe0a0cbbad248adbb9a6c897c9e4b930f64b3104894c484"} err="failed to get container status \"fd76bf1a8e5506f0bfe0a0cbbad248adbb9a6c897c9e4b930f64b3104894c484\": rpc error: code = NotFound desc = could not find container \"fd76bf1a8e5506f0bfe0a0cbbad248adbb9a6c897c9e4b930f64b3104894c484\": container with ID starting with fd76bf1a8e5506f0bfe0a0cbbad248adbb9a6c897c9e4b930f64b3104894c484 not found: ID does not exist" Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.115378 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-dc9d58d7-f6scn"] Oct 11 07:13:32 crc kubenswrapper[5055]: I1011 07:13:32.356093 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Oct 11 07:13:33 crc kubenswrapper[5055]: I1011 07:13:33.004344 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ef1af2d-934c-42a6-9206-f754332e3bec" path="/var/lib/kubelet/pods/2ef1af2d-934c-42a6-9206-f754332e3bec/volumes" Oct 11 07:13:33 crc kubenswrapper[5055]: I1011 07:13:33.507620 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d9e7-account-create-lgw6n" Oct 11 07:13:33 crc kubenswrapper[5055]: I1011 07:13:33.520474 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-fe2c-account-create-9pdsz" Oct 11 07:13:33 crc kubenswrapper[5055]: I1011 07:13:33.527284 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5ef3-account-create-xb4mj" Oct 11 07:13:33 crc kubenswrapper[5055]: I1011 07:13:33.648459 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wh5gq\" (UniqueName: \"kubernetes.io/projected/d331a829-dd7e-4cfb-98f4-dc2e797fdfa8-kube-api-access-wh5gq\") pod \"d331a829-dd7e-4cfb-98f4-dc2e797fdfa8\" (UID: \"d331a829-dd7e-4cfb-98f4-dc2e797fdfa8\") " Oct 11 07:13:33 crc kubenswrapper[5055]: I1011 07:13:33.648603 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8qk84\" (UniqueName: \"kubernetes.io/projected/b817f811-c9eb-4a75-b166-e379e0747e9f-kube-api-access-8qk84\") pod \"b817f811-c9eb-4a75-b166-e379e0747e9f\" (UID: \"b817f811-c9eb-4a75-b166-e379e0747e9f\") " Oct 11 07:13:33 crc kubenswrapper[5055]: I1011 07:13:33.648659 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-24bp2\" (UniqueName: \"kubernetes.io/projected/ad9b1fb9-5569-4981-ba98-1c45da1ba757-kube-api-access-24bp2\") pod \"ad9b1fb9-5569-4981-ba98-1c45da1ba757\" (UID: \"ad9b1fb9-5569-4981-ba98-1c45da1ba757\") " Oct 11 07:13:33 crc kubenswrapper[5055]: I1011 07:13:33.653147 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad9b1fb9-5569-4981-ba98-1c45da1ba757-kube-api-access-24bp2" (OuterVolumeSpecName: "kube-api-access-24bp2") pod "ad9b1fb9-5569-4981-ba98-1c45da1ba757" (UID: "ad9b1fb9-5569-4981-ba98-1c45da1ba757"). InnerVolumeSpecName "kube-api-access-24bp2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:13:33 crc kubenswrapper[5055]: I1011 07:13:33.653387 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d331a829-dd7e-4cfb-98f4-dc2e797fdfa8-kube-api-access-wh5gq" (OuterVolumeSpecName: "kube-api-access-wh5gq") pod "d331a829-dd7e-4cfb-98f4-dc2e797fdfa8" (UID: "d331a829-dd7e-4cfb-98f4-dc2e797fdfa8"). InnerVolumeSpecName "kube-api-access-wh5gq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:13:33 crc kubenswrapper[5055]: I1011 07:13:33.653974 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b817f811-c9eb-4a75-b166-e379e0747e9f-kube-api-access-8qk84" (OuterVolumeSpecName: "kube-api-access-8qk84") pod "b817f811-c9eb-4a75-b166-e379e0747e9f" (UID: "b817f811-c9eb-4a75-b166-e379e0747e9f"). InnerVolumeSpecName "kube-api-access-8qk84". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:13:33 crc kubenswrapper[5055]: I1011 07:13:33.750888 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wh5gq\" (UniqueName: \"kubernetes.io/projected/d331a829-dd7e-4cfb-98f4-dc2e797fdfa8-kube-api-access-wh5gq\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:33 crc kubenswrapper[5055]: I1011 07:13:33.750915 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8qk84\" (UniqueName: \"kubernetes.io/projected/b817f811-c9eb-4a75-b166-e379e0747e9f-kube-api-access-8qk84\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:33 crc kubenswrapper[5055]: I1011 07:13:33.750925 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-24bp2\" (UniqueName: \"kubernetes.io/projected/ad9b1fb9-5569-4981-ba98-1c45da1ba757-kube-api-access-24bp2\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:34 crc kubenswrapper[5055]: I1011 07:13:34.062511 5055 generic.go:334] "Generic (PLEG): container finished" podID="3038824c-7f07-4822-a8b5-a812d34adb18" containerID="525efc8a319ad46dfee6df76645ecba02f42e0c169294d33d2d7a6d571e09063" exitCode=0 Oct 11 07:13:34 crc kubenswrapper[5055]: I1011 07:13:34.062573 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-zkpgz" event={"ID":"3038824c-7f07-4822-a8b5-a812d34adb18","Type":"ContainerDied","Data":"525efc8a319ad46dfee6df76645ecba02f42e0c169294d33d2d7a6d571e09063"} Oct 11 07:13:34 crc kubenswrapper[5055]: I1011 07:13:34.065279 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5ef3-account-create-xb4mj" Oct 11 07:13:34 crc kubenswrapper[5055]: I1011 07:13:34.065275 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5ef3-account-create-xb4mj" event={"ID":"d331a829-dd7e-4cfb-98f4-dc2e797fdfa8","Type":"ContainerDied","Data":"aac9fe5aaaad6a1c089916af7833e3fa90906c2b2ea1cb7c7b387873b679292b"} Oct 11 07:13:34 crc kubenswrapper[5055]: I1011 07:13:34.065421 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aac9fe5aaaad6a1c089916af7833e3fa90906c2b2ea1cb7c7b387873b679292b" Oct 11 07:13:34 crc kubenswrapper[5055]: I1011 07:13:34.067064 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-fe2c-account-create-9pdsz" Oct 11 07:13:34 crc kubenswrapper[5055]: I1011 07:13:34.067081 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-fe2c-account-create-9pdsz" event={"ID":"ad9b1fb9-5569-4981-ba98-1c45da1ba757","Type":"ContainerDied","Data":"09cce37b1643f8aeba644d67307883e4faed535170f4b99bf3aee94053036795"} Oct 11 07:13:34 crc kubenswrapper[5055]: I1011 07:13:34.067101 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09cce37b1643f8aeba644d67307883e4faed535170f4b99bf3aee94053036795" Oct 11 07:13:34 crc kubenswrapper[5055]: I1011 07:13:34.068406 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-d9e7-account-create-lgw6n" event={"ID":"b817f811-c9eb-4a75-b166-e379e0747e9f","Type":"ContainerDied","Data":"5664603ab093a99c8a476132e04653c6630caec321ddf437b506d50b424f1f8f"} Oct 11 07:13:34 crc kubenswrapper[5055]: I1011 07:13:34.068431 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5664603ab093a99c8a476132e04653c6630caec321ddf437b506d50b424f1f8f" Oct 11 07:13:34 crc kubenswrapper[5055]: I1011 07:13:34.068467 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-d9e7-account-create-lgw6n" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.370410 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.477982 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3038824c-7f07-4822-a8b5-a812d34adb18-scripts\") pod \"3038824c-7f07-4822-a8b5-a812d34adb18\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.478042 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3038824c-7f07-4822-a8b5-a812d34adb18-etc-swift\") pod \"3038824c-7f07-4822-a8b5-a812d34adb18\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.478100 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-swiftconf\") pod \"3038824c-7f07-4822-a8b5-a812d34adb18\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.478117 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-dispersionconf\") pod \"3038824c-7f07-4822-a8b5-a812d34adb18\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.478143 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9gf5\" (UniqueName: \"kubernetes.io/projected/3038824c-7f07-4822-a8b5-a812d34adb18-kube-api-access-d9gf5\") pod \"3038824c-7f07-4822-a8b5-a812d34adb18\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.478175 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-combined-ca-bundle\") pod \"3038824c-7f07-4822-a8b5-a812d34adb18\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.478848 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3038824c-7f07-4822-a8b5-a812d34adb18-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "3038824c-7f07-4822-a8b5-a812d34adb18" (UID: "3038824c-7f07-4822-a8b5-a812d34adb18"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.479040 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3038824c-7f07-4822-a8b5-a812d34adb18-ring-data-devices\") pod \"3038824c-7f07-4822-a8b5-a812d34adb18\" (UID: \"3038824c-7f07-4822-a8b5-a812d34adb18\") " Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.479428 5055 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/3038824c-7f07-4822-a8b5-a812d34adb18-etc-swift\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.479450 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3038824c-7f07-4822-a8b5-a812d34adb18-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "3038824c-7f07-4822-a8b5-a812d34adb18" (UID: "3038824c-7f07-4822-a8b5-a812d34adb18"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.482640 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3038824c-7f07-4822-a8b5-a812d34adb18-kube-api-access-d9gf5" (OuterVolumeSpecName: "kube-api-access-d9gf5") pod "3038824c-7f07-4822-a8b5-a812d34adb18" (UID: "3038824c-7f07-4822-a8b5-a812d34adb18"). InnerVolumeSpecName "kube-api-access-d9gf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.486227 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "3038824c-7f07-4822-a8b5-a812d34adb18" (UID: "3038824c-7f07-4822-a8b5-a812d34adb18"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.497461 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3038824c-7f07-4822-a8b5-a812d34adb18-scripts" (OuterVolumeSpecName: "scripts") pod "3038824c-7f07-4822-a8b5-a812d34adb18" (UID: "3038824c-7f07-4822-a8b5-a812d34adb18"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.501588 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3038824c-7f07-4822-a8b5-a812d34adb18" (UID: "3038824c-7f07-4822-a8b5-a812d34adb18"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.503975 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "3038824c-7f07-4822-a8b5-a812d34adb18" (UID: "3038824c-7f07-4822-a8b5-a812d34adb18"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.580517 5055 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/3038824c-7f07-4822-a8b5-a812d34adb18-ring-data-devices\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.580548 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3038824c-7f07-4822-a8b5-a812d34adb18-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.580558 5055 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-swiftconf\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.580565 5055 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-dispersionconf\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.580574 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9gf5\" (UniqueName: \"kubernetes.io/projected/3038824c-7f07-4822-a8b5-a812d34adb18-kube-api-access-d9gf5\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:35 crc kubenswrapper[5055]: I1011 07:13:35.580585 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3038824c-7f07-4822-a8b5-a812d34adb18-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:36 crc kubenswrapper[5055]: I1011 07:13:36.085702 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-zkpgz" event={"ID":"3038824c-7f07-4822-a8b5-a812d34adb18","Type":"ContainerDied","Data":"f8f2d7b9c26ac324caa0efab292966137731dce9d62c0426e9178fdb3f15ee22"} Oct 11 07:13:36 crc kubenswrapper[5055]: I1011 07:13:36.086015 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8f2d7b9c26ac324caa0efab292966137731dce9d62c0426e9178fdb3f15ee22" Oct 11 07:13:36 crc kubenswrapper[5055]: I1011 07:13:36.085880 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-zkpgz" Oct 11 07:13:37 crc kubenswrapper[5055]: I1011 07:13:37.093991 5055 generic.go:334] "Generic (PLEG): container finished" podID="6baacc00-a270-4662-ba67-aad18287df2c" containerID="f0982b0e0f3eeaf7aa5484768477b7a1133fccf88ef1ea8df59f16ddad4fc302" exitCode=0 Oct 11 07:13:37 crc kubenswrapper[5055]: I1011 07:13:37.094081 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6baacc00-a270-4662-ba67-aad18287df2c","Type":"ContainerDied","Data":"f0982b0e0f3eeaf7aa5484768477b7a1133fccf88ef1ea8df59f16ddad4fc302"} Oct 11 07:13:37 crc kubenswrapper[5055]: I1011 07:13:37.095391 5055 generic.go:334] "Generic (PLEG): container finished" podID="80597a79-e3fd-41cd-b035-a35494775fcb" containerID="327706891e223da16a2f57a33e91fd6f8d0cf761a883a606b6cd91aaedf11f3e" exitCode=0 Oct 11 07:13:37 crc kubenswrapper[5055]: I1011 07:13:37.095419 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"80597a79-e3fd-41cd-b035-a35494775fcb","Type":"ContainerDied","Data":"327706891e223da16a2f57a33e91fd6f8d0cf761a883a606b6cd91aaedf11f3e"} Oct 11 07:13:37 crc kubenswrapper[5055]: I1011 07:13:37.922747 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:37 crc kubenswrapper[5055]: I1011 07:13:37.929118 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift\") pod \"swift-storage-0\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " pod="openstack/swift-storage-0" Oct 11 07:13:38 crc kubenswrapper[5055]: I1011 07:13:38.010352 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Oct 11 07:13:38 crc kubenswrapper[5055]: I1011 07:13:38.106011 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"80597a79-e3fd-41cd-b035-a35494775fcb","Type":"ContainerStarted","Data":"f88b82c51e63276746015a1cc5807c62948a2cdab828c2f6b3281f634111aa61"} Oct 11 07:13:38 crc kubenswrapper[5055]: I1011 07:13:38.106346 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Oct 11 07:13:38 crc kubenswrapper[5055]: I1011 07:13:38.110725 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6baacc00-a270-4662-ba67-aad18287df2c","Type":"ContainerStarted","Data":"afc6ef0c5aad5fc6c62e67db33d9a72dc22d3677a055603cfeadcd9d74609515"} Oct 11 07:13:38 crc kubenswrapper[5055]: I1011 07:13:38.110954 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:13:38 crc kubenswrapper[5055]: I1011 07:13:38.133675 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=-9223371972.721119 podStartE2EDuration="1m4.133656563s" podCreationTimestamp="2025-10-11 07:12:34 +0000 UTC" firstStartedPulling="2025-10-11 07:12:36.736078964 +0000 UTC m=+1140.510352771" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:13:38.130053881 +0000 UTC m=+1201.904327708" watchObservedRunningTime="2025-10-11 07:13:38.133656563 +0000 UTC m=+1201.907930360" Oct 11 07:13:38 crc kubenswrapper[5055]: I1011 07:13:38.154070 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.019107896 podStartE2EDuration="1m4.15404933s" podCreationTimestamp="2025-10-11 07:12:34 +0000 UTC" firstStartedPulling="2025-10-11 07:12:36.079419709 +0000 UTC m=+1139.853693516" lastFinishedPulling="2025-10-11 07:13:03.214361143 +0000 UTC m=+1166.988634950" observedRunningTime="2025-10-11 07:13:38.150373536 +0000 UTC m=+1201.924647343" watchObservedRunningTime="2025-10-11 07:13:38.15404933 +0000 UTC m=+1201.928323137" Oct 11 07:13:38 crc kubenswrapper[5055]: I1011 07:13:38.615313 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.118822 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"c09532594bf617b6a38bf1a26c2b619ece7604f6ac71de6f1d8a39bab11a43fc"} Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.529717 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-wjshb"] Oct 11 07:13:39 crc kubenswrapper[5055]: E1011 07:13:39.530040 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ef1af2d-934c-42a6-9206-f754332e3bec" containerName="init" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.530055 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ef1af2d-934c-42a6-9206-f754332e3bec" containerName="init" Oct 11 07:13:39 crc kubenswrapper[5055]: E1011 07:13:39.530072 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b817f811-c9eb-4a75-b166-e379e0747e9f" containerName="mariadb-account-create" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.530079 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b817f811-c9eb-4a75-b166-e379e0747e9f" containerName="mariadb-account-create" Oct 11 07:13:39 crc kubenswrapper[5055]: E1011 07:13:39.530089 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ef1af2d-934c-42a6-9206-f754332e3bec" containerName="dnsmasq-dns" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.530095 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ef1af2d-934c-42a6-9206-f754332e3bec" containerName="dnsmasq-dns" Oct 11 07:13:39 crc kubenswrapper[5055]: E1011 07:13:39.530116 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d331a829-dd7e-4cfb-98f4-dc2e797fdfa8" containerName="mariadb-account-create" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.530121 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="d331a829-dd7e-4cfb-98f4-dc2e797fdfa8" containerName="mariadb-account-create" Oct 11 07:13:39 crc kubenswrapper[5055]: E1011 07:13:39.530132 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3038824c-7f07-4822-a8b5-a812d34adb18" containerName="swift-ring-rebalance" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.530142 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3038824c-7f07-4822-a8b5-a812d34adb18" containerName="swift-ring-rebalance" Oct 11 07:13:39 crc kubenswrapper[5055]: E1011 07:13:39.530154 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad9b1fb9-5569-4981-ba98-1c45da1ba757" containerName="mariadb-account-create" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.530161 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad9b1fb9-5569-4981-ba98-1c45da1ba757" containerName="mariadb-account-create" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.530301 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="d331a829-dd7e-4cfb-98f4-dc2e797fdfa8" containerName="mariadb-account-create" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.530335 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad9b1fb9-5569-4981-ba98-1c45da1ba757" containerName="mariadb-account-create" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.530352 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b817f811-c9eb-4a75-b166-e379e0747e9f" containerName="mariadb-account-create" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.530365 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3038824c-7f07-4822-a8b5-a812d34adb18" containerName="swift-ring-rebalance" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.530371 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ef1af2d-934c-42a6-9206-f754332e3bec" containerName="dnsmasq-dns" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.530920 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wjshb" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.533599 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.533604 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-msbtk" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.540166 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-wjshb"] Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.650728 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt6q5\" (UniqueName: \"kubernetes.io/projected/53b11174-ab68-45f3-8933-522549982191-kube-api-access-nt6q5\") pod \"glance-db-sync-wjshb\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " pod="openstack/glance-db-sync-wjshb" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.650844 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-config-data\") pod \"glance-db-sync-wjshb\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " pod="openstack/glance-db-sync-wjshb" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.650884 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-combined-ca-bundle\") pod \"glance-db-sync-wjshb\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " pod="openstack/glance-db-sync-wjshb" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.650917 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-db-sync-config-data\") pod \"glance-db-sync-wjshb\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " pod="openstack/glance-db-sync-wjshb" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.752699 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt6q5\" (UniqueName: \"kubernetes.io/projected/53b11174-ab68-45f3-8933-522549982191-kube-api-access-nt6q5\") pod \"glance-db-sync-wjshb\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " pod="openstack/glance-db-sync-wjshb" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.752804 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-config-data\") pod \"glance-db-sync-wjshb\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " pod="openstack/glance-db-sync-wjshb" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.752842 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-combined-ca-bundle\") pod \"glance-db-sync-wjshb\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " pod="openstack/glance-db-sync-wjshb" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.752904 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-db-sync-config-data\") pod \"glance-db-sync-wjshb\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " pod="openstack/glance-db-sync-wjshb" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.758234 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-combined-ca-bundle\") pod \"glance-db-sync-wjshb\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " pod="openstack/glance-db-sync-wjshb" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.759159 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-config-data\") pod \"glance-db-sync-wjshb\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " pod="openstack/glance-db-sync-wjshb" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.771047 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt6q5\" (UniqueName: \"kubernetes.io/projected/53b11174-ab68-45f3-8933-522549982191-kube-api-access-nt6q5\") pod \"glance-db-sync-wjshb\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " pod="openstack/glance-db-sync-wjshb" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.771319 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-db-sync-config-data\") pod \"glance-db-sync-wjshb\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " pod="openstack/glance-db-sync-wjshb" Oct 11 07:13:39 crc kubenswrapper[5055]: I1011 07:13:39.890872 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wjshb" Oct 11 07:13:40 crc kubenswrapper[5055]: I1011 07:13:40.131113 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"c874955aa943101ec45b5190a57d5bc2728a209a837dabcf4f0dd080c233522f"} Oct 11 07:13:40 crc kubenswrapper[5055]: I1011 07:13:40.402200 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-2l5hh" podUID="51322291-81d0-4cbc-a761-0294a8365fd3" containerName="ovn-controller" probeResult="failure" output=< Oct 11 07:13:40 crc kubenswrapper[5055]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Oct 11 07:13:40 crc kubenswrapper[5055]: > Oct 11 07:13:40 crc kubenswrapper[5055]: I1011 07:13:40.429318 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-wjshb"] Oct 11 07:13:40 crc kubenswrapper[5055]: W1011 07:13:40.439968 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod53b11174_ab68_45f3_8933_522549982191.slice/crio-204799684e424289a80be307db1a217576378393dcdd766de051f7cbc0607357 WatchSource:0}: Error finding container 204799684e424289a80be307db1a217576378393dcdd766de051f7cbc0607357: Status 404 returned error can't find the container with id 204799684e424289a80be307db1a217576378393dcdd766de051f7cbc0607357 Oct 11 07:13:41 crc kubenswrapper[5055]: I1011 07:13:41.141173 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"b49bbf96e1ad1276ddfb0167a4e342e71a2d3248b8dd30e171b623fe16b3e5b3"} Oct 11 07:13:41 crc kubenswrapper[5055]: I1011 07:13:41.141222 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"169a3663bf7e265b73d12b362a875432f15995d044bb153d925d7a1f2aded521"} Oct 11 07:13:41 crc kubenswrapper[5055]: I1011 07:13:41.141232 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"56af820548674816735daf22fa48f4a5a08266e6d8ffee8ee41bf2dc35f87bc1"} Oct 11 07:13:41 crc kubenswrapper[5055]: I1011 07:13:41.142437 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wjshb" event={"ID":"53b11174-ab68-45f3-8933-522549982191","Type":"ContainerStarted","Data":"204799684e424289a80be307db1a217576378393dcdd766de051f7cbc0607357"} Oct 11 07:13:44 crc kubenswrapper[5055]: I1011 07:13:44.167250 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"e41f04a80d59653f303efa7f5149b061d767b0550c8a0d24bd1f7a2ffb40a7a7"} Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.181303 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"29c5e0db8a9d4f0f2dc6b72ae8af6d9ada7256824853969407ab923a9710b4ca"} Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.181350 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"52170fd074acd4d0b541fe66faa6dbfd0a1f3e12dbda56b05f7ebf193e4489b4"} Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.181361 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"07ab4219d9bf5b7769a004dfff97e17520e92f8e30791238e6e136cf29d5a8e7"} Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.378738 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.380812 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.395635 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-2l5hh" podUID="51322291-81d0-4cbc-a761-0294a8365fd3" containerName="ovn-controller" probeResult="failure" output=< Oct 11 07:13:45 crc kubenswrapper[5055]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Oct 11 07:13:45 crc kubenswrapper[5055]: > Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.598463 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-2l5hh-config-tstxf"] Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.599425 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.603200 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.652817 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-2l5hh-config-tstxf"] Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.768361 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-run\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.768450 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-log-ovn\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.768491 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/91470ef3-c560-4d04-961b-19af67e97cc6-additional-scripts\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.768509 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-run-ovn\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.768532 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vf4dk\" (UniqueName: \"kubernetes.io/projected/91470ef3-c560-4d04-961b-19af67e97cc6-kube-api-access-vf4dk\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.768602 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/91470ef3-c560-4d04-961b-19af67e97cc6-scripts\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.870157 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/91470ef3-c560-4d04-961b-19af67e97cc6-scripts\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.870221 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-run\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.870292 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-log-ovn\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.870336 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/91470ef3-c560-4d04-961b-19af67e97cc6-additional-scripts\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.870357 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-run-ovn\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.870387 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vf4dk\" (UniqueName: \"kubernetes.io/projected/91470ef3-c560-4d04-961b-19af67e97cc6-kube-api-access-vf4dk\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.870687 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-log-ovn\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.870779 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-run\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.870844 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-run-ovn\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.871361 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/91470ef3-c560-4d04-961b-19af67e97cc6-additional-scripts\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.872629 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/91470ef3-c560-4d04-961b-19af67e97cc6-scripts\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.889476 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vf4dk\" (UniqueName: \"kubernetes.io/projected/91470ef3-c560-4d04-961b-19af67e97cc6-kube-api-access-vf4dk\") pod \"ovn-controller-2l5hh-config-tstxf\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:45 crc kubenswrapper[5055]: I1011 07:13:45.919208 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:46 crc kubenswrapper[5055]: I1011 07:13:46.367673 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-2l5hh-config-tstxf"] Oct 11 07:13:48 crc kubenswrapper[5055]: W1011 07:13:48.273085 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91470ef3_c560_4d04_961b_19af67e97cc6.slice/crio-5f956990629aa45f7e470afca5285e254652c3cff02c38e9c873fcbf99458dcd WatchSource:0}: Error finding container 5f956990629aa45f7e470afca5285e254652c3cff02c38e9c873fcbf99458dcd: Status 404 returned error can't find the container with id 5f956990629aa45f7e470afca5285e254652c3cff02c38e9c873fcbf99458dcd Oct 11 07:13:49 crc kubenswrapper[5055]: I1011 07:13:49.212909 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2l5hh-config-tstxf" event={"ID":"91470ef3-c560-4d04-961b-19af67e97cc6","Type":"ContainerStarted","Data":"5f956990629aa45f7e470afca5285e254652c3cff02c38e9c873fcbf99458dcd"} Oct 11 07:13:50 crc kubenswrapper[5055]: I1011 07:13:50.456671 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-2l5hh" podUID="51322291-81d0-4cbc-a761-0294a8365fd3" containerName="ovn-controller" probeResult="failure" output=< Oct 11 07:13:50 crc kubenswrapper[5055]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Oct 11 07:13:50 crc kubenswrapper[5055]: > Oct 11 07:13:55 crc kubenswrapper[5055]: I1011 07:13:55.257490 5055 generic.go:334] "Generic (PLEG): container finished" podID="91470ef3-c560-4d04-961b-19af67e97cc6" containerID="6451ddf3c93d11d13c19726cabdb7eab83c68b055d4ea78ef5eba294d82d05d6" exitCode=0 Oct 11 07:13:55 crc kubenswrapper[5055]: I1011 07:13:55.257636 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2l5hh-config-tstxf" event={"ID":"91470ef3-c560-4d04-961b-19af67e97cc6","Type":"ContainerDied","Data":"6451ddf3c93d11d13c19726cabdb7eab83c68b055d4ea78ef5eba294d82d05d6"} Oct 11 07:13:55 crc kubenswrapper[5055]: I1011 07:13:55.262512 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"98a3bd9053451cb0809632f4b67e138c0b75c3d90227f09fcbaeb9c60c6b6bb6"} Oct 11 07:13:55 crc kubenswrapper[5055]: I1011 07:13:55.262543 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"99e83bf3a8eb2b7367fbf738da227780a1807885d63f0d0e27ddf82536f1f23f"} Oct 11 07:13:55 crc kubenswrapper[5055]: I1011 07:13:55.262554 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"e8435110db69165389151e0e5ced69984b6208142b9e0d1c97ebf70f6117a07b"} Oct 11 07:13:55 crc kubenswrapper[5055]: I1011 07:13:55.264137 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wjshb" event={"ID":"53b11174-ab68-45f3-8933-522549982191","Type":"ContainerStarted","Data":"a0a0abf70d2c007bb1c49c14a15f4843ffb20deb9122e77b28327e5e2a26d9c6"} Oct 11 07:13:55 crc kubenswrapper[5055]: I1011 07:13:55.293712 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-wjshb" podStartSLOduration=2.15000551 podStartE2EDuration="16.293688056s" podCreationTimestamp="2025-10-11 07:13:39 +0000 UTC" firstStartedPulling="2025-10-11 07:13:40.442310741 +0000 UTC m=+1204.216584548" lastFinishedPulling="2025-10-11 07:13:54.585993287 +0000 UTC m=+1218.360267094" observedRunningTime="2025-10-11 07:13:55.289439356 +0000 UTC m=+1219.063713163" watchObservedRunningTime="2025-10-11 07:13:55.293688056 +0000 UTC m=+1219.067961863" Oct 11 07:13:55 crc kubenswrapper[5055]: I1011 07:13:55.391203 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-2l5hh" Oct 11 07:13:55 crc kubenswrapper[5055]: I1011 07:13:55.547918 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:13:56 crc kubenswrapper[5055]: I1011 07:13:56.159948 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Oct 11 07:13:56 crc kubenswrapper[5055]: I1011 07:13:56.278718 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"06502cb3633b89b873682e6009f7ef8a153676dfb10f0635ff187c1ee1f7afb6"} Oct 11 07:13:56 crc kubenswrapper[5055]: I1011 07:13:56.279043 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"ec154ac9d6d4c663a8d3edda9d0ce209dd40a1836be6f8424d6593198e1668ad"} Oct 11 07:13:56 crc kubenswrapper[5055]: I1011 07:13:56.279056 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"f3423b44a9f0fea83f6193fc9009ffe5ed847e103301a15f805c18dd8d170f9e"} Oct 11 07:13:56 crc kubenswrapper[5055]: I1011 07:13:56.279067 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerStarted","Data":"ccd2a2a490675a60a807b626b3856bccab72588539a3eb4bd4bf5dcf6d915355"} Oct 11 07:13:56 crc kubenswrapper[5055]: I1011 07:13:56.332736 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=20.412689071 podStartE2EDuration="36.332716677s" podCreationTimestamp="2025-10-11 07:13:20 +0000 UTC" firstStartedPulling="2025-10-11 07:13:38.622663176 +0000 UTC m=+1202.396936993" lastFinishedPulling="2025-10-11 07:13:54.542690792 +0000 UTC m=+1218.316964599" observedRunningTime="2025-10-11 07:13:56.318596708 +0000 UTC m=+1220.092870515" watchObservedRunningTime="2025-10-11 07:13:56.332716677 +0000 UTC m=+1220.106990484" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.601748 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.650002 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-564965cbfc-s2h6b"] Oct 11 07:13:57 crc kubenswrapper[5055]: E1011 07:13:56.650315 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91470ef3-c560-4d04-961b-19af67e97cc6" containerName="ovn-config" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.650326 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="91470ef3-c560-4d04-961b-19af67e97cc6" containerName="ovn-config" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.650504 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="91470ef3-c560-4d04-961b-19af67e97cc6" containerName="ovn-config" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.655791 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.659941 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.674239 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-564965cbfc-s2h6b"] Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.753262 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-run-ovn\") pod \"91470ef3-c560-4d04-961b-19af67e97cc6\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.753333 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-run\") pod \"91470ef3-c560-4d04-961b-19af67e97cc6\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.753398 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "91470ef3-c560-4d04-961b-19af67e97cc6" (UID: "91470ef3-c560-4d04-961b-19af67e97cc6"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.753463 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/91470ef3-c560-4d04-961b-19af67e97cc6-scripts\") pod \"91470ef3-c560-4d04-961b-19af67e97cc6\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.753469 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-run" (OuterVolumeSpecName: "var-run") pod "91470ef3-c560-4d04-961b-19af67e97cc6" (UID: "91470ef3-c560-4d04-961b-19af67e97cc6"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.753518 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vf4dk\" (UniqueName: \"kubernetes.io/projected/91470ef3-c560-4d04-961b-19af67e97cc6-kube-api-access-vf4dk\") pod \"91470ef3-c560-4d04-961b-19af67e97cc6\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.753551 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-log-ovn\") pod \"91470ef3-c560-4d04-961b-19af67e97cc6\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.753589 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/91470ef3-c560-4d04-961b-19af67e97cc6-additional-scripts\") pod \"91470ef3-c560-4d04-961b-19af67e97cc6\" (UID: \"91470ef3-c560-4d04-961b-19af67e97cc6\") " Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.753672 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "91470ef3-c560-4d04-961b-19af67e97cc6" (UID: "91470ef3-c560-4d04-961b-19af67e97cc6"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.753808 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8w4t2\" (UniqueName: \"kubernetes.io/projected/6fa844dd-aab4-4d83-99f9-55a21616df97-kube-api-access-8w4t2\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.753842 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-dns-svc\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.753864 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-dns-swift-storage-0\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.754039 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-ovsdbserver-nb\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.754075 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-config\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.754181 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-ovsdbserver-sb\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.754278 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91470ef3-c560-4d04-961b-19af67e97cc6-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "91470ef3-c560-4d04-961b-19af67e97cc6" (UID: "91470ef3-c560-4d04-961b-19af67e97cc6"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.754529 5055 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-log-ovn\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.754545 5055 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/91470ef3-c560-4d04-961b-19af67e97cc6-additional-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.754558 5055 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.754570 5055 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/91470ef3-c560-4d04-961b-19af67e97cc6-var-run\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.754590 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91470ef3-c560-4d04-961b-19af67e97cc6-scripts" (OuterVolumeSpecName: "scripts") pod "91470ef3-c560-4d04-961b-19af67e97cc6" (UID: "91470ef3-c560-4d04-961b-19af67e97cc6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.769263 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91470ef3-c560-4d04-961b-19af67e97cc6-kube-api-access-vf4dk" (OuterVolumeSpecName: "kube-api-access-vf4dk") pod "91470ef3-c560-4d04-961b-19af67e97cc6" (UID: "91470ef3-c560-4d04-961b-19af67e97cc6"). InnerVolumeSpecName "kube-api-access-vf4dk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.855739 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8w4t2\" (UniqueName: \"kubernetes.io/projected/6fa844dd-aab4-4d83-99f9-55a21616df97-kube-api-access-8w4t2\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.856103 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-dns-svc\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.856137 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-dns-swift-storage-0\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.856204 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-ovsdbserver-nb\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.856230 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-config\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.856274 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-ovsdbserver-sb\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.856353 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/91470ef3-c560-4d04-961b-19af67e97cc6-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.856367 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vf4dk\" (UniqueName: \"kubernetes.io/projected/91470ef3-c560-4d04-961b-19af67e97cc6-kube-api-access-vf4dk\") on node \"crc\" DevicePath \"\"" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.857016 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-dns-swift-storage-0\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.857052 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-dns-svc\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.857249 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-config\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.857299 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-ovsdbserver-sb\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.857847 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-ovsdbserver-nb\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.874238 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8w4t2\" (UniqueName: \"kubernetes.io/projected/6fa844dd-aab4-4d83-99f9-55a21616df97-kube-api-access-8w4t2\") pod \"dnsmasq-dns-564965cbfc-s2h6b\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:56.979199 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.286612 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2l5hh-config-tstxf" event={"ID":"91470ef3-c560-4d04-961b-19af67e97cc6","Type":"ContainerDied","Data":"5f956990629aa45f7e470afca5285e254652c3cff02c38e9c873fcbf99458dcd"} Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.286892 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f956990629aa45f7e470afca5285e254652c3cff02c38e9c873fcbf99458dcd" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.286632 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2l5hh-config-tstxf" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.381560 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-xsnj4"] Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.382588 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xsnj4" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.396139 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-xsnj4"] Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.484237 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-rwbjm"] Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.485486 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rwbjm" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.494254 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-rwbjm"] Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.501758 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-564965cbfc-s2h6b"] Oct 11 07:13:57 crc kubenswrapper[5055]: W1011 07:13:57.524202 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6fa844dd_aab4_4d83_99f9_55a21616df97.slice/crio-3b9a726998e25d5a7716e8a81c14878ec5a6186af2d7436475a00d484790c20c WatchSource:0}: Error finding container 3b9a726998e25d5a7716e8a81c14878ec5a6186af2d7436475a00d484790c20c: Status 404 returned error can't find the container with id 3b9a726998e25d5a7716e8a81c14878ec5a6186af2d7436475a00d484790c20c Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.565148 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwdd8\" (UniqueName: \"kubernetes.io/projected/7902df61-9a6f-4f11-bb2f-95843e9df7a5-kube-api-access-gwdd8\") pod \"cinder-db-create-xsnj4\" (UID: \"7902df61-9a6f-4f11-bb2f-95843e9df7a5\") " pod="openstack/cinder-db-create-xsnj4" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.608464 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-s265b"] Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.609557 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-s265b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.668626 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nr8bb\" (UniqueName: \"kubernetes.io/projected/e8dcd064-841e-43a0-a210-33ee23f04450-kube-api-access-nr8bb\") pod \"barbican-db-create-rwbjm\" (UID: \"e8dcd064-841e-43a0-a210-33ee23f04450\") " pod="openstack/barbican-db-create-rwbjm" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.668697 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwdd8\" (UniqueName: \"kubernetes.io/projected/7902df61-9a6f-4f11-bb2f-95843e9df7a5-kube-api-access-gwdd8\") pod \"cinder-db-create-xsnj4\" (UID: \"7902df61-9a6f-4f11-bb2f-95843e9df7a5\") " pod="openstack/cinder-db-create-xsnj4" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.686512 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-s265b"] Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.699837 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwdd8\" (UniqueName: \"kubernetes.io/projected/7902df61-9a6f-4f11-bb2f-95843e9df7a5-kube-api-access-gwdd8\") pod \"cinder-db-create-xsnj4\" (UID: \"7902df61-9a6f-4f11-bb2f-95843e9df7a5\") " pod="openstack/cinder-db-create-xsnj4" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.700395 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xsnj4" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.718377 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-gwmkb"] Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.719576 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gwmkb" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.730482 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.730707 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.730822 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.730922 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-v2rpk" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.738164 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-gwmkb"] Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.770628 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nr8bb\" (UniqueName: \"kubernetes.io/projected/e8dcd064-841e-43a0-a210-33ee23f04450-kube-api-access-nr8bb\") pod \"barbican-db-create-rwbjm\" (UID: \"e8dcd064-841e-43a0-a210-33ee23f04450\") " pod="openstack/barbican-db-create-rwbjm" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.770720 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdw45\" (UniqueName: \"kubernetes.io/projected/f76f706d-47b9-43df-94f1-80767e21f5c9-kube-api-access-xdw45\") pod \"neutron-db-create-s265b\" (UID: \"f76f706d-47b9-43df-94f1-80767e21f5c9\") " pod="openstack/neutron-db-create-s265b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.825108 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nr8bb\" (UniqueName: \"kubernetes.io/projected/e8dcd064-841e-43a0-a210-33ee23f04450-kube-api-access-nr8bb\") pod \"barbican-db-create-rwbjm\" (UID: \"e8dcd064-841e-43a0-a210-33ee23f04450\") " pod="openstack/barbican-db-create-rwbjm" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.843412 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-2l5hh-config-tstxf"] Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.857383 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-2l5hh-config-tstxf"] Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.873318 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phgg9\" (UniqueName: \"kubernetes.io/projected/4dcbfb53-b641-4aa7-91e3-43225acb15f4-kube-api-access-phgg9\") pod \"keystone-db-sync-gwmkb\" (UID: \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\") " pod="openstack/keystone-db-sync-gwmkb" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.873451 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdw45\" (UniqueName: \"kubernetes.io/projected/f76f706d-47b9-43df-94f1-80767e21f5c9-kube-api-access-xdw45\") pod \"neutron-db-create-s265b\" (UID: \"f76f706d-47b9-43df-94f1-80767e21f5c9\") " pod="openstack/neutron-db-create-s265b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.873529 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dcbfb53-b641-4aa7-91e3-43225acb15f4-config-data\") pod \"keystone-db-sync-gwmkb\" (UID: \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\") " pod="openstack/keystone-db-sync-gwmkb" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.873545 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcbfb53-b641-4aa7-91e3-43225acb15f4-combined-ca-bundle\") pod \"keystone-db-sync-gwmkb\" (UID: \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\") " pod="openstack/keystone-db-sync-gwmkb" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.906896 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdw45\" (UniqueName: \"kubernetes.io/projected/f76f706d-47b9-43df-94f1-80767e21f5c9-kube-api-access-xdw45\") pod \"neutron-db-create-s265b\" (UID: \"f76f706d-47b9-43df-94f1-80767e21f5c9\") " pod="openstack/neutron-db-create-s265b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.952791 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-s265b" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.976004 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcbfb53-b641-4aa7-91e3-43225acb15f4-combined-ca-bundle\") pod \"keystone-db-sync-gwmkb\" (UID: \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\") " pod="openstack/keystone-db-sync-gwmkb" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.976060 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dcbfb53-b641-4aa7-91e3-43225acb15f4-config-data\") pod \"keystone-db-sync-gwmkb\" (UID: \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\") " pod="openstack/keystone-db-sync-gwmkb" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.976122 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phgg9\" (UniqueName: \"kubernetes.io/projected/4dcbfb53-b641-4aa7-91e3-43225acb15f4-kube-api-access-phgg9\") pod \"keystone-db-sync-gwmkb\" (UID: \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\") " pod="openstack/keystone-db-sync-gwmkb" Oct 11 07:13:57 crc kubenswrapper[5055]: I1011 07:13:57.991438 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dcbfb53-b641-4aa7-91e3-43225acb15f4-config-data\") pod \"keystone-db-sync-gwmkb\" (UID: \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\") " pod="openstack/keystone-db-sync-gwmkb" Oct 11 07:13:58 crc kubenswrapper[5055]: I1011 07:13:58.004947 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phgg9\" (UniqueName: \"kubernetes.io/projected/4dcbfb53-b641-4aa7-91e3-43225acb15f4-kube-api-access-phgg9\") pod \"keystone-db-sync-gwmkb\" (UID: \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\") " pod="openstack/keystone-db-sync-gwmkb" Oct 11 07:13:58 crc kubenswrapper[5055]: I1011 07:13:58.017569 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcbfb53-b641-4aa7-91e3-43225acb15f4-combined-ca-bundle\") pod \"keystone-db-sync-gwmkb\" (UID: \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\") " pod="openstack/keystone-db-sync-gwmkb" Oct 11 07:13:58 crc kubenswrapper[5055]: I1011 07:13:58.104530 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rwbjm" Oct 11 07:13:58 crc kubenswrapper[5055]: I1011 07:13:58.240328 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gwmkb" Oct 11 07:13:58 crc kubenswrapper[5055]: I1011 07:13:58.294685 5055 generic.go:334] "Generic (PLEG): container finished" podID="6fa844dd-aab4-4d83-99f9-55a21616df97" containerID="e16682232f8bec558ae094fb14bcd1b3ee75ce876399588885bb36f3942934ae" exitCode=0 Oct 11 07:13:58 crc kubenswrapper[5055]: I1011 07:13:58.294730 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" event={"ID":"6fa844dd-aab4-4d83-99f9-55a21616df97","Type":"ContainerDied","Data":"e16682232f8bec558ae094fb14bcd1b3ee75ce876399588885bb36f3942934ae"} Oct 11 07:13:58 crc kubenswrapper[5055]: I1011 07:13:58.294755 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" event={"ID":"6fa844dd-aab4-4d83-99f9-55a21616df97","Type":"ContainerStarted","Data":"3b9a726998e25d5a7716e8a81c14878ec5a6186af2d7436475a00d484790c20c"} Oct 11 07:13:58 crc kubenswrapper[5055]: I1011 07:13:58.393852 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-xsnj4"] Oct 11 07:13:58 crc kubenswrapper[5055]: W1011 07:13:58.395876 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7902df61_9a6f_4f11_bb2f_95843e9df7a5.slice/crio-cabd6d2fc571b9fc4cedc65897c925b0c476e0bfcdfa9c37789e381bbe8a0670 WatchSource:0}: Error finding container cabd6d2fc571b9fc4cedc65897c925b0c476e0bfcdfa9c37789e381bbe8a0670: Status 404 returned error can't find the container with id cabd6d2fc571b9fc4cedc65897c925b0c476e0bfcdfa9c37789e381bbe8a0670 Oct 11 07:13:58 crc kubenswrapper[5055]: W1011 07:13:58.537860 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf76f706d_47b9_43df_94f1_80767e21f5c9.slice/crio-e75f4769e0a71c2288f5c86a042f1f217e874c9007de51f663f9b7ddc528dcff WatchSource:0}: Error finding container e75f4769e0a71c2288f5c86a042f1f217e874c9007de51f663f9b7ddc528dcff: Status 404 returned error can't find the container with id e75f4769e0a71c2288f5c86a042f1f217e874c9007de51f663f9b7ddc528dcff Oct 11 07:13:58 crc kubenswrapper[5055]: I1011 07:13:58.540246 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-s265b"] Oct 11 07:13:58 crc kubenswrapper[5055]: I1011 07:13:58.611106 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-rwbjm"] Oct 11 07:13:58 crc kubenswrapper[5055]: W1011 07:13:58.679924 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8dcd064_841e_43a0_a210_33ee23f04450.slice/crio-dc2e10f67a78d5efbed627821ccf022ee06d4c5404dd6e5baa19e2af83f36868 WatchSource:0}: Error finding container dc2e10f67a78d5efbed627821ccf022ee06d4c5404dd6e5baa19e2af83f36868: Status 404 returned error can't find the container with id dc2e10f67a78d5efbed627821ccf022ee06d4c5404dd6e5baa19e2af83f36868 Oct 11 07:13:58 crc kubenswrapper[5055]: I1011 07:13:58.740077 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-gwmkb"] Oct 11 07:13:58 crc kubenswrapper[5055]: W1011 07:13:58.755923 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4dcbfb53_b641_4aa7_91e3_43225acb15f4.slice/crio-3965d82b2867f9992cac03615a9dc84a275f73bab7821165ce35253318cf2e26 WatchSource:0}: Error finding container 3965d82b2867f9992cac03615a9dc84a275f73bab7821165ce35253318cf2e26: Status 404 returned error can't find the container with id 3965d82b2867f9992cac03615a9dc84a275f73bab7821165ce35253318cf2e26 Oct 11 07:13:59 crc kubenswrapper[5055]: I1011 07:13:59.005272 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91470ef3-c560-4d04-961b-19af67e97cc6" path="/var/lib/kubelet/pods/91470ef3-c560-4d04-961b-19af67e97cc6/volumes" Oct 11 07:13:59 crc kubenswrapper[5055]: I1011 07:13:59.304518 5055 generic.go:334] "Generic (PLEG): container finished" podID="7902df61-9a6f-4f11-bb2f-95843e9df7a5" containerID="4d0f46837749fa8e6cb46bd60b4aed62acdaa53ca62154c632c0c613c678b675" exitCode=0 Oct 11 07:13:59 crc kubenswrapper[5055]: I1011 07:13:59.304558 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-xsnj4" event={"ID":"7902df61-9a6f-4f11-bb2f-95843e9df7a5","Type":"ContainerDied","Data":"4d0f46837749fa8e6cb46bd60b4aed62acdaa53ca62154c632c0c613c678b675"} Oct 11 07:13:59 crc kubenswrapper[5055]: I1011 07:13:59.304595 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-xsnj4" event={"ID":"7902df61-9a6f-4f11-bb2f-95843e9df7a5","Type":"ContainerStarted","Data":"cabd6d2fc571b9fc4cedc65897c925b0c476e0bfcdfa9c37789e381bbe8a0670"} Oct 11 07:13:59 crc kubenswrapper[5055]: I1011 07:13:59.306394 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" event={"ID":"6fa844dd-aab4-4d83-99f9-55a21616df97","Type":"ContainerStarted","Data":"6dbef68ea506918e7392e4a32ef2f3f7fb0b9b43f49c44079bc8e892471e7889"} Oct 11 07:13:59 crc kubenswrapper[5055]: I1011 07:13:59.306513 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:13:59 crc kubenswrapper[5055]: I1011 07:13:59.308666 5055 generic.go:334] "Generic (PLEG): container finished" podID="f76f706d-47b9-43df-94f1-80767e21f5c9" containerID="c875a88f3743d3993dd9a3e16831b0a6d030be3ef2be3e9f3ed67168c54bcd11" exitCode=0 Oct 11 07:13:59 crc kubenswrapper[5055]: I1011 07:13:59.308759 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-s265b" event={"ID":"f76f706d-47b9-43df-94f1-80767e21f5c9","Type":"ContainerDied","Data":"c875a88f3743d3993dd9a3e16831b0a6d030be3ef2be3e9f3ed67168c54bcd11"} Oct 11 07:13:59 crc kubenswrapper[5055]: I1011 07:13:59.308792 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-s265b" event={"ID":"f76f706d-47b9-43df-94f1-80767e21f5c9","Type":"ContainerStarted","Data":"e75f4769e0a71c2288f5c86a042f1f217e874c9007de51f663f9b7ddc528dcff"} Oct 11 07:13:59 crc kubenswrapper[5055]: I1011 07:13:59.310024 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gwmkb" event={"ID":"4dcbfb53-b641-4aa7-91e3-43225acb15f4","Type":"ContainerStarted","Data":"3965d82b2867f9992cac03615a9dc84a275f73bab7821165ce35253318cf2e26"} Oct 11 07:13:59 crc kubenswrapper[5055]: I1011 07:13:59.311335 5055 generic.go:334] "Generic (PLEG): container finished" podID="e8dcd064-841e-43a0-a210-33ee23f04450" containerID="671b6200e8eab05242809aab3fde8beb4a2d22eb5c3a00ba72673a18bd57b820" exitCode=0 Oct 11 07:13:59 crc kubenswrapper[5055]: I1011 07:13:59.311398 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-rwbjm" event={"ID":"e8dcd064-841e-43a0-a210-33ee23f04450","Type":"ContainerDied","Data":"671b6200e8eab05242809aab3fde8beb4a2d22eb5c3a00ba72673a18bd57b820"} Oct 11 07:13:59 crc kubenswrapper[5055]: I1011 07:13:59.311417 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-rwbjm" event={"ID":"e8dcd064-841e-43a0-a210-33ee23f04450","Type":"ContainerStarted","Data":"dc2e10f67a78d5efbed627821ccf022ee06d4c5404dd6e5baa19e2af83f36868"} Oct 11 07:13:59 crc kubenswrapper[5055]: I1011 07:13:59.354154 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" podStartSLOduration=3.354132308 podStartE2EDuration="3.354132308s" podCreationTimestamp="2025-10-11 07:13:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:13:59.351263607 +0000 UTC m=+1223.125537414" watchObservedRunningTime="2025-10-11 07:13:59.354132308 +0000 UTC m=+1223.128406125" Oct 11 07:14:00 crc kubenswrapper[5055]: I1011 07:14:00.684159 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rwbjm" Oct 11 07:14:00 crc kubenswrapper[5055]: I1011 07:14:00.820739 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nr8bb\" (UniqueName: \"kubernetes.io/projected/e8dcd064-841e-43a0-a210-33ee23f04450-kube-api-access-nr8bb\") pod \"e8dcd064-841e-43a0-a210-33ee23f04450\" (UID: \"e8dcd064-841e-43a0-a210-33ee23f04450\") " Oct 11 07:14:00 crc kubenswrapper[5055]: I1011 07:14:00.826324 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8dcd064-841e-43a0-a210-33ee23f04450-kube-api-access-nr8bb" (OuterVolumeSpecName: "kube-api-access-nr8bb") pod "e8dcd064-841e-43a0-a210-33ee23f04450" (UID: "e8dcd064-841e-43a0-a210-33ee23f04450"). InnerVolumeSpecName "kube-api-access-nr8bb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:00 crc kubenswrapper[5055]: I1011 07:14:00.922923 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nr8bb\" (UniqueName: \"kubernetes.io/projected/e8dcd064-841e-43a0-a210-33ee23f04450-kube-api-access-nr8bb\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:01 crc kubenswrapper[5055]: I1011 07:14:01.333835 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-rwbjm" event={"ID":"e8dcd064-841e-43a0-a210-33ee23f04450","Type":"ContainerDied","Data":"dc2e10f67a78d5efbed627821ccf022ee06d4c5404dd6e5baa19e2af83f36868"} Oct 11 07:14:01 crc kubenswrapper[5055]: I1011 07:14:01.333880 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc2e10f67a78d5efbed627821ccf022ee06d4c5404dd6e5baa19e2af83f36868" Oct 11 07:14:01 crc kubenswrapper[5055]: I1011 07:14:01.333947 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rwbjm" Oct 11 07:14:02 crc kubenswrapper[5055]: I1011 07:14:02.359914 5055 generic.go:334] "Generic (PLEG): container finished" podID="53b11174-ab68-45f3-8933-522549982191" containerID="a0a0abf70d2c007bb1c49c14a15f4843ffb20deb9122e77b28327e5e2a26d9c6" exitCode=0 Oct 11 07:14:02 crc kubenswrapper[5055]: I1011 07:14:02.359988 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wjshb" event={"ID":"53b11174-ab68-45f3-8933-522549982191","Type":"ContainerDied","Data":"a0a0abf70d2c007bb1c49c14a15f4843ffb20deb9122e77b28327e5e2a26d9c6"} Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.133092 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xsnj4" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.141105 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-s265b" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.261896 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gwdd8\" (UniqueName: \"kubernetes.io/projected/7902df61-9a6f-4f11-bb2f-95843e9df7a5-kube-api-access-gwdd8\") pod \"7902df61-9a6f-4f11-bb2f-95843e9df7a5\" (UID: \"7902df61-9a6f-4f11-bb2f-95843e9df7a5\") " Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.262090 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xdw45\" (UniqueName: \"kubernetes.io/projected/f76f706d-47b9-43df-94f1-80767e21f5c9-kube-api-access-xdw45\") pod \"f76f706d-47b9-43df-94f1-80767e21f5c9\" (UID: \"f76f706d-47b9-43df-94f1-80767e21f5c9\") " Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.266138 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f76f706d-47b9-43df-94f1-80767e21f5c9-kube-api-access-xdw45" (OuterVolumeSpecName: "kube-api-access-xdw45") pod "f76f706d-47b9-43df-94f1-80767e21f5c9" (UID: "f76f706d-47b9-43df-94f1-80767e21f5c9"). InnerVolumeSpecName "kube-api-access-xdw45". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.266194 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7902df61-9a6f-4f11-bb2f-95843e9df7a5-kube-api-access-gwdd8" (OuterVolumeSpecName: "kube-api-access-gwdd8") pod "7902df61-9a6f-4f11-bb2f-95843e9df7a5" (UID: "7902df61-9a6f-4f11-bb2f-95843e9df7a5"). InnerVolumeSpecName "kube-api-access-gwdd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.363555 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xdw45\" (UniqueName: \"kubernetes.io/projected/f76f706d-47b9-43df-94f1-80767e21f5c9-kube-api-access-xdw45\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.363855 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gwdd8\" (UniqueName: \"kubernetes.io/projected/7902df61-9a6f-4f11-bb2f-95843e9df7a5-kube-api-access-gwdd8\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.367569 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gwmkb" event={"ID":"4dcbfb53-b641-4aa7-91e3-43225acb15f4","Type":"ContainerStarted","Data":"2bbd19f08b9f56aadd0332921f0d27aafe6bbf0e9122095351a0b2e167b99652"} Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.369970 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-xsnj4" event={"ID":"7902df61-9a6f-4f11-bb2f-95843e9df7a5","Type":"ContainerDied","Data":"cabd6d2fc571b9fc4cedc65897c925b0c476e0bfcdfa9c37789e381bbe8a0670"} Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.370004 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cabd6d2fc571b9fc4cedc65897c925b0c476e0bfcdfa9c37789e381bbe8a0670" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.369984 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xsnj4" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.371408 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-s265b" event={"ID":"f76f706d-47b9-43df-94f1-80767e21f5c9","Type":"ContainerDied","Data":"e75f4769e0a71c2288f5c86a042f1f217e874c9007de51f663f9b7ddc528dcff"} Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.371435 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e75f4769e0a71c2288f5c86a042f1f217e874c9007de51f663f9b7ddc528dcff" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.371540 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-s265b" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.398511 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-gwmkb" podStartSLOduration=2.247608333 podStartE2EDuration="6.398492883s" podCreationTimestamp="2025-10-11 07:13:57 +0000 UTC" firstStartedPulling="2025-10-11 07:13:58.775322914 +0000 UTC m=+1222.549596721" lastFinishedPulling="2025-10-11 07:14:02.926207464 +0000 UTC m=+1226.700481271" observedRunningTime="2025-10-11 07:14:03.389348335 +0000 UTC m=+1227.163622142" watchObservedRunningTime="2025-10-11 07:14:03.398492883 +0000 UTC m=+1227.172766690" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.700920 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wjshb" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.769203 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-db-sync-config-data\") pod \"53b11174-ab68-45f3-8933-522549982191\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.769262 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nt6q5\" (UniqueName: \"kubernetes.io/projected/53b11174-ab68-45f3-8933-522549982191-kube-api-access-nt6q5\") pod \"53b11174-ab68-45f3-8933-522549982191\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.769336 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-config-data\") pod \"53b11174-ab68-45f3-8933-522549982191\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.769429 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-combined-ca-bundle\") pod \"53b11174-ab68-45f3-8933-522549982191\" (UID: \"53b11174-ab68-45f3-8933-522549982191\") " Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.774061 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53b11174-ab68-45f3-8933-522549982191-kube-api-access-nt6q5" (OuterVolumeSpecName: "kube-api-access-nt6q5") pod "53b11174-ab68-45f3-8933-522549982191" (UID: "53b11174-ab68-45f3-8933-522549982191"). InnerVolumeSpecName "kube-api-access-nt6q5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.774081 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "53b11174-ab68-45f3-8933-522549982191" (UID: "53b11174-ab68-45f3-8933-522549982191"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.797422 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "53b11174-ab68-45f3-8933-522549982191" (UID: "53b11174-ab68-45f3-8933-522549982191"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.812331 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-config-data" (OuterVolumeSpecName: "config-data") pod "53b11174-ab68-45f3-8933-522549982191" (UID: "53b11174-ab68-45f3-8933-522549982191"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.871428 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.871468 5055 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.871479 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nt6q5\" (UniqueName: \"kubernetes.io/projected/53b11174-ab68-45f3-8933-522549982191-kube-api-access-nt6q5\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:03 crc kubenswrapper[5055]: I1011 07:14:03.871491 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53b11174-ab68-45f3-8933-522549982191-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.380206 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wjshb" event={"ID":"53b11174-ab68-45f3-8933-522549982191","Type":"ContainerDied","Data":"204799684e424289a80be307db1a217576378393dcdd766de051f7cbc0607357"} Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.380536 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="204799684e424289a80be307db1a217576378393dcdd766de051f7cbc0607357" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.380261 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wjshb" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.834563 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-564965cbfc-s2h6b"] Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.835000 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" podUID="6fa844dd-aab4-4d83-99f9-55a21616df97" containerName="dnsmasq-dns" containerID="cri-o://6dbef68ea506918e7392e4a32ef2f3f7fb0b9b43f49c44079bc8e892471e7889" gracePeriod=10 Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.836011 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.865333 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-795846498c-5rms7"] Oct 11 07:14:04 crc kubenswrapper[5055]: E1011 07:14:04.865676 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f76f706d-47b9-43df-94f1-80767e21f5c9" containerName="mariadb-database-create" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.865710 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f76f706d-47b9-43df-94f1-80767e21f5c9" containerName="mariadb-database-create" Oct 11 07:14:04 crc kubenswrapper[5055]: E1011 07:14:04.865723 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7902df61-9a6f-4f11-bb2f-95843e9df7a5" containerName="mariadb-database-create" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.865729 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7902df61-9a6f-4f11-bb2f-95843e9df7a5" containerName="mariadb-database-create" Oct 11 07:14:04 crc kubenswrapper[5055]: E1011 07:14:04.865752 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8dcd064-841e-43a0-a210-33ee23f04450" containerName="mariadb-database-create" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.865759 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8dcd064-841e-43a0-a210-33ee23f04450" containerName="mariadb-database-create" Oct 11 07:14:04 crc kubenswrapper[5055]: E1011 07:14:04.865785 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53b11174-ab68-45f3-8933-522549982191" containerName="glance-db-sync" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.865791 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="53b11174-ab68-45f3-8933-522549982191" containerName="glance-db-sync" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.865937 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8dcd064-841e-43a0-a210-33ee23f04450" containerName="mariadb-database-create" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.865954 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="53b11174-ab68-45f3-8933-522549982191" containerName="glance-db-sync" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.865974 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="7902df61-9a6f-4f11-bb2f-95843e9df7a5" containerName="mariadb-database-create" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.865985 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f76f706d-47b9-43df-94f1-80767e21f5c9" containerName="mariadb-database-create" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.866897 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.896262 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-795846498c-5rms7"] Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.988660 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmnm7\" (UniqueName: \"kubernetes.io/projected/7b18ae77-9686-4c7b-a557-8f5375c62fbb-kube-api-access-tmnm7\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.988737 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-dns-swift-storage-0\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.988757 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-ovsdbserver-sb\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.988815 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-dns-svc\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.988836 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-ovsdbserver-nb\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:04 crc kubenswrapper[5055]: I1011 07:14:04.989078 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-config\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.091374 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-dns-svc\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.091725 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-ovsdbserver-nb\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.091805 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-config\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.091900 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmnm7\" (UniqueName: \"kubernetes.io/projected/7b18ae77-9686-4c7b-a557-8f5375c62fbb-kube-api-access-tmnm7\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.091970 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-dns-swift-storage-0\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.092010 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-ovsdbserver-sb\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.092295 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-dns-svc\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.093206 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-ovsdbserver-nb\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.093683 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-ovsdbserver-sb\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.093837 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-config\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.095008 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-dns-swift-storage-0\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.122181 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmnm7\" (UniqueName: \"kubernetes.io/projected/7b18ae77-9686-4c7b-a557-8f5375c62fbb-kube-api-access-tmnm7\") pod \"dnsmasq-dns-795846498c-5rms7\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.239523 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.340900 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.397253 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-config\") pod \"6fa844dd-aab4-4d83-99f9-55a21616df97\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.397306 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-dns-svc\") pod \"6fa844dd-aab4-4d83-99f9-55a21616df97\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.397404 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-ovsdbserver-sb\") pod \"6fa844dd-aab4-4d83-99f9-55a21616df97\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.397476 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-ovsdbserver-nb\") pod \"6fa844dd-aab4-4d83-99f9-55a21616df97\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.397523 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8w4t2\" (UniqueName: \"kubernetes.io/projected/6fa844dd-aab4-4d83-99f9-55a21616df97-kube-api-access-8w4t2\") pod \"6fa844dd-aab4-4d83-99f9-55a21616df97\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.397575 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-dns-swift-storage-0\") pod \"6fa844dd-aab4-4d83-99f9-55a21616df97\" (UID: \"6fa844dd-aab4-4d83-99f9-55a21616df97\") " Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.412038 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fa844dd-aab4-4d83-99f9-55a21616df97-kube-api-access-8w4t2" (OuterVolumeSpecName: "kube-api-access-8w4t2") pod "6fa844dd-aab4-4d83-99f9-55a21616df97" (UID: "6fa844dd-aab4-4d83-99f9-55a21616df97"). InnerVolumeSpecName "kube-api-access-8w4t2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.416678 5055 generic.go:334] "Generic (PLEG): container finished" podID="6fa844dd-aab4-4d83-99f9-55a21616df97" containerID="6dbef68ea506918e7392e4a32ef2f3f7fb0b9b43f49c44079bc8e892471e7889" exitCode=0 Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.416723 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" event={"ID":"6fa844dd-aab4-4d83-99f9-55a21616df97","Type":"ContainerDied","Data":"6dbef68ea506918e7392e4a32ef2f3f7fb0b9b43f49c44079bc8e892471e7889"} Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.416743 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.416805 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-564965cbfc-s2h6b" event={"ID":"6fa844dd-aab4-4d83-99f9-55a21616df97","Type":"ContainerDied","Data":"3b9a726998e25d5a7716e8a81c14878ec5a6186af2d7436475a00d484790c20c"} Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.416850 5055 scope.go:117] "RemoveContainer" containerID="6dbef68ea506918e7392e4a32ef2f3f7fb0b9b43f49c44079bc8e892471e7889" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.441408 5055 scope.go:117] "RemoveContainer" containerID="e16682232f8bec558ae094fb14bcd1b3ee75ce876399588885bb36f3942934ae" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.465452 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6fa844dd-aab4-4d83-99f9-55a21616df97" (UID: "6fa844dd-aab4-4d83-99f9-55a21616df97"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.468399 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6fa844dd-aab4-4d83-99f9-55a21616df97" (UID: "6fa844dd-aab4-4d83-99f9-55a21616df97"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.469590 5055 scope.go:117] "RemoveContainer" containerID="6dbef68ea506918e7392e4a32ef2f3f7fb0b9b43f49c44079bc8e892471e7889" Oct 11 07:14:05 crc kubenswrapper[5055]: E1011 07:14:05.469993 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6dbef68ea506918e7392e4a32ef2f3f7fb0b9b43f49c44079bc8e892471e7889\": container with ID starting with 6dbef68ea506918e7392e4a32ef2f3f7fb0b9b43f49c44079bc8e892471e7889 not found: ID does not exist" containerID="6dbef68ea506918e7392e4a32ef2f3f7fb0b9b43f49c44079bc8e892471e7889" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.470043 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6dbef68ea506918e7392e4a32ef2f3f7fb0b9b43f49c44079bc8e892471e7889"} err="failed to get container status \"6dbef68ea506918e7392e4a32ef2f3f7fb0b9b43f49c44079bc8e892471e7889\": rpc error: code = NotFound desc = could not find container \"6dbef68ea506918e7392e4a32ef2f3f7fb0b9b43f49c44079bc8e892471e7889\": container with ID starting with 6dbef68ea506918e7392e4a32ef2f3f7fb0b9b43f49c44079bc8e892471e7889 not found: ID does not exist" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.470061 5055 scope.go:117] "RemoveContainer" containerID="e16682232f8bec558ae094fb14bcd1b3ee75ce876399588885bb36f3942934ae" Oct 11 07:14:05 crc kubenswrapper[5055]: E1011 07:14:05.470433 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e16682232f8bec558ae094fb14bcd1b3ee75ce876399588885bb36f3942934ae\": container with ID starting with e16682232f8bec558ae094fb14bcd1b3ee75ce876399588885bb36f3942934ae not found: ID does not exist" containerID="e16682232f8bec558ae094fb14bcd1b3ee75ce876399588885bb36f3942934ae" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.470488 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e16682232f8bec558ae094fb14bcd1b3ee75ce876399588885bb36f3942934ae"} err="failed to get container status \"e16682232f8bec558ae094fb14bcd1b3ee75ce876399588885bb36f3942934ae\": rpc error: code = NotFound desc = could not find container \"e16682232f8bec558ae094fb14bcd1b3ee75ce876399588885bb36f3942934ae\": container with ID starting with e16682232f8bec558ae094fb14bcd1b3ee75ce876399588885bb36f3942934ae not found: ID does not exist" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.473503 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6fa844dd-aab4-4d83-99f9-55a21616df97" (UID: "6fa844dd-aab4-4d83-99f9-55a21616df97"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.478692 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-config" (OuterVolumeSpecName: "config") pod "6fa844dd-aab4-4d83-99f9-55a21616df97" (UID: "6fa844dd-aab4-4d83-99f9-55a21616df97"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.479354 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6fa844dd-aab4-4d83-99f9-55a21616df97" (UID: "6fa844dd-aab4-4d83-99f9-55a21616df97"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.499531 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8w4t2\" (UniqueName: \"kubernetes.io/projected/6fa844dd-aab4-4d83-99f9-55a21616df97-kube-api-access-8w4t2\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.499579 5055 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.499591 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.499599 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.499608 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.499615 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6fa844dd-aab4-4d83-99f9-55a21616df97-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.733538 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-795846498c-5rms7"] Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.744981 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-564965cbfc-s2h6b"] Oct 11 07:14:05 crc kubenswrapper[5055]: I1011 07:14:05.759201 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-564965cbfc-s2h6b"] Oct 11 07:14:06 crc kubenswrapper[5055]: I1011 07:14:06.462291 5055 generic.go:334] "Generic (PLEG): container finished" podID="7b18ae77-9686-4c7b-a557-8f5375c62fbb" containerID="0db0dd873de18d856fdda4c1474ed17a4dc798d3b87f3c3db5d2e5e449c6a80e" exitCode=0 Oct 11 07:14:06 crc kubenswrapper[5055]: I1011 07:14:06.462393 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795846498c-5rms7" event={"ID":"7b18ae77-9686-4c7b-a557-8f5375c62fbb","Type":"ContainerDied","Data":"0db0dd873de18d856fdda4c1474ed17a4dc798d3b87f3c3db5d2e5e449c6a80e"} Oct 11 07:14:06 crc kubenswrapper[5055]: I1011 07:14:06.462592 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795846498c-5rms7" event={"ID":"7b18ae77-9686-4c7b-a557-8f5375c62fbb","Type":"ContainerStarted","Data":"e920cd22506b8c22667947690e50a7940355299861bc096b3f1349586a9425d5"} Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.006690 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fa844dd-aab4-4d83-99f9-55a21616df97" path="/var/lib/kubelet/pods/6fa844dd-aab4-4d83-99f9-55a21616df97/volumes" Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.470869 5055 generic.go:334] "Generic (PLEG): container finished" podID="4dcbfb53-b641-4aa7-91e3-43225acb15f4" containerID="2bbd19f08b9f56aadd0332921f0d27aafe6bbf0e9122095351a0b2e167b99652" exitCode=0 Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.470976 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gwmkb" event={"ID":"4dcbfb53-b641-4aa7-91e3-43225acb15f4","Type":"ContainerDied","Data":"2bbd19f08b9f56aadd0332921f0d27aafe6bbf0e9122095351a0b2e167b99652"} Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.476759 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795846498c-5rms7" event={"ID":"7b18ae77-9686-4c7b-a557-8f5375c62fbb","Type":"ContainerStarted","Data":"2da37814ec97e153799bfb62e2e6038db861eb49cc92a041c3dbfa5208350c06"} Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.477750 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.485269 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-1232-account-create-xtcnm"] Oct 11 07:14:07 crc kubenswrapper[5055]: E1011 07:14:07.487256 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fa844dd-aab4-4d83-99f9-55a21616df97" containerName="dnsmasq-dns" Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.487371 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fa844dd-aab4-4d83-99f9-55a21616df97" containerName="dnsmasq-dns" Oct 11 07:14:07 crc kubenswrapper[5055]: E1011 07:14:07.487543 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fa844dd-aab4-4d83-99f9-55a21616df97" containerName="init" Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.487593 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fa844dd-aab4-4d83-99f9-55a21616df97" containerName="init" Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.487844 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fa844dd-aab4-4d83-99f9-55a21616df97" containerName="dnsmasq-dns" Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.488952 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1232-account-create-xtcnm" Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.491486 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.499751 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1232-account-create-xtcnm"] Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.545167 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-795846498c-5rms7" podStartSLOduration=3.545148304 podStartE2EDuration="3.545148304s" podCreationTimestamp="2025-10-11 07:14:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:14:07.542796247 +0000 UTC m=+1231.317070084" watchObservedRunningTime="2025-10-11 07:14:07.545148304 +0000 UTC m=+1231.319422111" Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.639939 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8ftp\" (UniqueName: \"kubernetes.io/projected/14f4e1eb-4198-4c36-a920-8161b80d1f9a-kube-api-access-f8ftp\") pod \"barbican-1232-account-create-xtcnm\" (UID: \"14f4e1eb-4198-4c36-a920-8161b80d1f9a\") " pod="openstack/barbican-1232-account-create-xtcnm" Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.741928 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8ftp\" (UniqueName: \"kubernetes.io/projected/14f4e1eb-4198-4c36-a920-8161b80d1f9a-kube-api-access-f8ftp\") pod \"barbican-1232-account-create-xtcnm\" (UID: \"14f4e1eb-4198-4c36-a920-8161b80d1f9a\") " pod="openstack/barbican-1232-account-create-xtcnm" Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.761141 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8ftp\" (UniqueName: \"kubernetes.io/projected/14f4e1eb-4198-4c36-a920-8161b80d1f9a-kube-api-access-f8ftp\") pod \"barbican-1232-account-create-xtcnm\" (UID: \"14f4e1eb-4198-4c36-a920-8161b80d1f9a\") " pod="openstack/barbican-1232-account-create-xtcnm" Oct 11 07:14:07 crc kubenswrapper[5055]: I1011 07:14:07.816590 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1232-account-create-xtcnm" Oct 11 07:14:08 crc kubenswrapper[5055]: I1011 07:14:08.259079 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1232-account-create-xtcnm"] Oct 11 07:14:08 crc kubenswrapper[5055]: W1011 07:14:08.261608 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14f4e1eb_4198_4c36_a920_8161b80d1f9a.slice/crio-dc586b6a5396f8875e71c62af7a924bbfe5e3a00a9449d6d8781e1198e8039eb WatchSource:0}: Error finding container dc586b6a5396f8875e71c62af7a924bbfe5e3a00a9449d6d8781e1198e8039eb: Status 404 returned error can't find the container with id dc586b6a5396f8875e71c62af7a924bbfe5e3a00a9449d6d8781e1198e8039eb Oct 11 07:14:08 crc kubenswrapper[5055]: I1011 07:14:08.485934 5055 generic.go:334] "Generic (PLEG): container finished" podID="14f4e1eb-4198-4c36-a920-8161b80d1f9a" containerID="1dd6a3d80213a8d25175233d04091817c1abddfa820fae04faffb4f6ff9eb010" exitCode=0 Oct 11 07:14:08 crc kubenswrapper[5055]: I1011 07:14:08.486028 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1232-account-create-xtcnm" event={"ID":"14f4e1eb-4198-4c36-a920-8161b80d1f9a","Type":"ContainerDied","Data":"1dd6a3d80213a8d25175233d04091817c1abddfa820fae04faffb4f6ff9eb010"} Oct 11 07:14:08 crc kubenswrapper[5055]: I1011 07:14:08.486307 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1232-account-create-xtcnm" event={"ID":"14f4e1eb-4198-4c36-a920-8161b80d1f9a","Type":"ContainerStarted","Data":"dc586b6a5396f8875e71c62af7a924bbfe5e3a00a9449d6d8781e1198e8039eb"} Oct 11 07:14:08 crc kubenswrapper[5055]: I1011 07:14:08.724675 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gwmkb" Oct 11 07:14:08 crc kubenswrapper[5055]: I1011 07:14:08.863556 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcbfb53-b641-4aa7-91e3-43225acb15f4-combined-ca-bundle\") pod \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\" (UID: \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\") " Oct 11 07:14:08 crc kubenswrapper[5055]: I1011 07:14:08.863689 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dcbfb53-b641-4aa7-91e3-43225acb15f4-config-data\") pod \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\" (UID: \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\") " Oct 11 07:14:08 crc kubenswrapper[5055]: I1011 07:14:08.863821 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phgg9\" (UniqueName: \"kubernetes.io/projected/4dcbfb53-b641-4aa7-91e3-43225acb15f4-kube-api-access-phgg9\") pod \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\" (UID: \"4dcbfb53-b641-4aa7-91e3-43225acb15f4\") " Oct 11 07:14:08 crc kubenswrapper[5055]: I1011 07:14:08.869596 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dcbfb53-b641-4aa7-91e3-43225acb15f4-kube-api-access-phgg9" (OuterVolumeSpecName: "kube-api-access-phgg9") pod "4dcbfb53-b641-4aa7-91e3-43225acb15f4" (UID: "4dcbfb53-b641-4aa7-91e3-43225acb15f4"). InnerVolumeSpecName "kube-api-access-phgg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:08 crc kubenswrapper[5055]: I1011 07:14:08.890109 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dcbfb53-b641-4aa7-91e3-43225acb15f4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4dcbfb53-b641-4aa7-91e3-43225acb15f4" (UID: "4dcbfb53-b641-4aa7-91e3-43225acb15f4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:08 crc kubenswrapper[5055]: I1011 07:14:08.912226 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dcbfb53-b641-4aa7-91e3-43225acb15f4-config-data" (OuterVolumeSpecName: "config-data") pod "4dcbfb53-b641-4aa7-91e3-43225acb15f4" (UID: "4dcbfb53-b641-4aa7-91e3-43225acb15f4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:08 crc kubenswrapper[5055]: I1011 07:14:08.966448 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dcbfb53-b641-4aa7-91e3-43225acb15f4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:08 crc kubenswrapper[5055]: I1011 07:14:08.966737 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4dcbfb53-b641-4aa7-91e3-43225acb15f4-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:08 crc kubenswrapper[5055]: I1011 07:14:08.966984 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phgg9\" (UniqueName: \"kubernetes.io/projected/4dcbfb53-b641-4aa7-91e3-43225acb15f4-kube-api-access-phgg9\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.497527 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gwmkb" event={"ID":"4dcbfb53-b641-4aa7-91e3-43225acb15f4","Type":"ContainerDied","Data":"3965d82b2867f9992cac03615a9dc84a275f73bab7821165ce35253318cf2e26"} Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.499513 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3965d82b2867f9992cac03615a9dc84a275f73bab7821165ce35253318cf2e26" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.497623 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gwmkb" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.785734 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-795846498c-5rms7"] Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.793689 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-bgwxw"] Oct 11 07:14:09 crc kubenswrapper[5055]: E1011 07:14:09.794266 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dcbfb53-b641-4aa7-91e3-43225acb15f4" containerName="keystone-db-sync" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.794277 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dcbfb53-b641-4aa7-91e3-43225acb15f4" containerName="keystone-db-sync" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.794455 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dcbfb53-b641-4aa7-91e3-43225acb15f4" containerName="keystone-db-sync" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.796609 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.800682 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.800864 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.800867 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-v2rpk" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.801106 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.806995 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-bgwxw"] Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.852399 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5"] Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.861747 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.867865 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5"] Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.887303 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1232-account-create-xtcnm" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.888249 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-credential-keys\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.888293 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjg4f\" (UniqueName: \"kubernetes.io/projected/84c264e3-e231-408d-8110-bb7163f5cffd-kube-api-access-zjg4f\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.888364 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-fernet-keys\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.888418 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-config-data\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.888463 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-combined-ca-bundle\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.888485 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-scripts\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.989367 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8ftp\" (UniqueName: \"kubernetes.io/projected/14f4e1eb-4198-4c36-a920-8161b80d1f9a-kube-api-access-f8ftp\") pod \"14f4e1eb-4198-4c36-a920-8161b80d1f9a\" (UID: \"14f4e1eb-4198-4c36-a920-8161b80d1f9a\") " Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.989837 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjg4f\" (UniqueName: \"kubernetes.io/projected/84c264e3-e231-408d-8110-bb7163f5cffd-kube-api-access-zjg4f\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.989911 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-dns-svc\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.989941 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4xr2\" (UniqueName: \"kubernetes.io/projected/6d33d3de-28bb-42fe-b7fb-384e64c2167d-kube-api-access-q4xr2\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.989963 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-ovsdbserver-sb\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.990022 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-fernet-keys\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.990062 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-config\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.990108 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-config-data\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.990155 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-combined-ca-bundle\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.990181 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-scripts\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.990229 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-ovsdbserver-nb\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.990292 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-dns-swift-storage-0\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.990347 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-credential-keys\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.994471 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-credential-keys\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.998573 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-combined-ca-bundle\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:09 crc kubenswrapper[5055]: I1011 07:14:09.998735 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14f4e1eb-4198-4c36-a920-8161b80d1f9a-kube-api-access-f8ftp" (OuterVolumeSpecName: "kube-api-access-f8ftp") pod "14f4e1eb-4198-4c36-a920-8161b80d1f9a" (UID: "14f4e1eb-4198-4c36-a920-8161b80d1f9a"). InnerVolumeSpecName "kube-api-access-f8ftp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.006123 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-scripts\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.006209 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-config-data\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.017855 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-fernet-keys\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.050818 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjg4f\" (UniqueName: \"kubernetes.io/projected/84c264e3-e231-408d-8110-bb7163f5cffd-kube-api-access-zjg4f\") pod \"keystone-bootstrap-bgwxw\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.060876 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:14:10 crc kubenswrapper[5055]: E1011 07:14:10.061961 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14f4e1eb-4198-4c36-a920-8161b80d1f9a" containerName="mariadb-account-create" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.061983 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="14f4e1eb-4198-4c36-a920-8161b80d1f9a" containerName="mariadb-account-create" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.062525 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="14f4e1eb-4198-4c36-a920-8161b80d1f9a" containerName="mariadb-account-create" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.092238 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-ovsdbserver-nb\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.092387 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-dns-swift-storage-0\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.092477 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-dns-svc\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.092498 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4xr2\" (UniqueName: \"kubernetes.io/projected/6d33d3de-28bb-42fe-b7fb-384e64c2167d-kube-api-access-q4xr2\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.092541 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-ovsdbserver-sb\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.092648 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-config\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.092809 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8ftp\" (UniqueName: \"kubernetes.io/projected/14f4e1eb-4198-4c36-a920-8161b80d1f9a-kube-api-access-f8ftp\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.093726 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-config\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.096172 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-ovsdbserver-nb\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.098249 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-dns-swift-storage-0\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.099109 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-ovsdbserver-sb\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.099834 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-dns-svc\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.100002 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.100189 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.121835 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.122068 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.128930 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4xr2\" (UniqueName: \"kubernetes.io/projected/6d33d3de-28bb-42fe-b7fb-384e64c2167d-kube-api-access-q4xr2\") pod \"dnsmasq-dns-6b4bfdd7f7-5dtg5\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.186694 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5"] Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.187317 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.194671 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.194741 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-scripts\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.194803 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3908605d-c4b6-4df0-84a7-1137a9928b09-log-httpd\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.194849 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.194881 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3908605d-c4b6-4df0-84a7-1137a9928b09-run-httpd\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.194906 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pwv2\" (UniqueName: \"kubernetes.io/projected/3908605d-c4b6-4df0-84a7-1137a9928b09-kube-api-access-7pwv2\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.194936 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-config-data\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.215230 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.219822 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5dc68bd5-9z7rv"] Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.221160 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.249035 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5dc68bd5-9z7rv"] Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.256951 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-6lhht"] Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.258998 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.260670 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.260944 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-6jp7q" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.260995 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.266153 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-6lhht"] Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.297835 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3908605d-c4b6-4df0-84a7-1137a9928b09-run-httpd\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.297889 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pwv2\" (UniqueName: \"kubernetes.io/projected/3908605d-c4b6-4df0-84a7-1137a9928b09-kube-api-access-7pwv2\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.297923 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-config\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.297939 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-config-data\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.298011 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-ovsdbserver-nb\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.298050 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.298070 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jxnh\" (UniqueName: \"kubernetes.io/projected/02b7eb87-002d-454c-bc42-efe7ffdd18e3-kube-api-access-6jxnh\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.298092 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-ovsdbserver-sb\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.298108 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-scripts\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.298141 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-dns-swift-storage-0\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.298164 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-dns-svc\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.298189 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3908605d-c4b6-4df0-84a7-1137a9928b09-log-httpd\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.298324 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.298676 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3908605d-c4b6-4df0-84a7-1137a9928b09-log-httpd\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.301333 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3908605d-c4b6-4df0-84a7-1137a9928b09-run-httpd\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.303825 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-scripts\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.313735 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.320042 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.320672 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-config-data\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.324085 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pwv2\" (UniqueName: \"kubernetes.io/projected/3908605d-c4b6-4df0-84a7-1137a9928b09-kube-api-access-7pwv2\") pod \"ceilometer-0\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.402366 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-combined-ca-bundle\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.402585 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-config\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.402622 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-scripts\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.402665 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-ovsdbserver-nb\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.402696 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jxnh\" (UniqueName: \"kubernetes.io/projected/02b7eb87-002d-454c-bc42-efe7ffdd18e3-kube-api-access-6jxnh\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.402711 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-config-data\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.402730 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/11f3f545-5226-44e9-956b-1c79012e5a74-logs\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.402747 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99gl7\" (UniqueName: \"kubernetes.io/projected/11f3f545-5226-44e9-956b-1c79012e5a74-kube-api-access-99gl7\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.402778 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-ovsdbserver-sb\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.402803 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-dns-swift-storage-0\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.402819 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-dns-svc\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.403509 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-dns-svc\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.404160 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-config\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.405473 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-ovsdbserver-sb\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.406217 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-dns-swift-storage-0\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.406263 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-ovsdbserver-nb\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.481656 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jxnh\" (UniqueName: \"kubernetes.io/projected/02b7eb87-002d-454c-bc42-efe7ffdd18e3-kube-api-access-6jxnh\") pod \"dnsmasq-dns-5dc68bd5-9z7rv\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.483153 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.509249 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-combined-ca-bundle\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.509385 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-scripts\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.509444 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-config-data\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.509467 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/11f3f545-5226-44e9-956b-1c79012e5a74-logs\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.509496 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99gl7\" (UniqueName: \"kubernetes.io/projected/11f3f545-5226-44e9-956b-1c79012e5a74-kube-api-access-99gl7\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.511203 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/11f3f545-5226-44e9-956b-1c79012e5a74-logs\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.515168 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-scripts\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.515624 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-config-data\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.516172 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-combined-ca-bundle\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.520336 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-795846498c-5rms7" podUID="7b18ae77-9686-4c7b-a557-8f5375c62fbb" containerName="dnsmasq-dns" containerID="cri-o://2da37814ec97e153799bfb62e2e6038db861eb49cc92a041c3dbfa5208350c06" gracePeriod=10 Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.520444 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1232-account-create-xtcnm" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.521906 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1232-account-create-xtcnm" event={"ID":"14f4e1eb-4198-4c36-a920-8161b80d1f9a","Type":"ContainerDied","Data":"dc586b6a5396f8875e71c62af7a924bbfe5e3a00a9449d6d8781e1198e8039eb"} Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.521951 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc586b6a5396f8875e71c62af7a924bbfe5e3a00a9449d6d8781e1198e8039eb" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.528235 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99gl7\" (UniqueName: \"kubernetes.io/projected/11f3f545-5226-44e9-956b-1c79012e5a74-kube-api-access-99gl7\") pod \"placement-db-sync-6lhht\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.546664 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.588423 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.854565 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-bgwxw"] Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.915143 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.918965 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.923017 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.923240 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.923466 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.923572 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-msbtk" Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.956217 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5"] Oct 11 07:14:10 crc kubenswrapper[5055]: I1011 07:14:10.977075 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.018911 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-scripts\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.018997 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-config-data\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.019203 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.019247 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxpc7\" (UniqueName: \"kubernetes.io/projected/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-kube-api-access-xxpc7\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.019270 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.019291 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-logs\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.019321 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.019342 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.032161 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.034195 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.038964 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.042797 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.046448 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.050241 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:14:11 crc kubenswrapper[5055]: W1011 07:14:11.093312 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3908605d_c4b6_4df0_84a7_1137a9928b09.slice/crio-9dc29d93eff33329f28fa4fe8fa49ca84374f675cb9b53bc7557fb53ba019d8a WatchSource:0}: Error finding container 9dc29d93eff33329f28fa4fe8fa49ca84374f675cb9b53bc7557fb53ba019d8a: Status 404 returned error can't find the container with id 9dc29d93eff33329f28fa4fe8fa49ca84374f675cb9b53bc7557fb53ba019d8a Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121246 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrmfj\" (UniqueName: \"kubernetes.io/projected/0261d435-deca-4eeb-adc5-04ae46321ec8-kube-api-access-nrmfj\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121287 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-logs\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121321 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121359 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121385 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121405 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-scripts\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121422 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121446 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121475 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-config-data\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121522 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121553 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0261d435-deca-4eeb-adc5-04ae46321ec8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121627 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0261d435-deca-4eeb-adc5-04ae46321ec8-logs\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121657 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121694 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121721 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-logs\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121750 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxpc7\" (UniqueName: \"kubernetes.io/projected/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-kube-api-access-xxpc7\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.121807 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.122247 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.123124 5055 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.127142 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.127713 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-scripts\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.128150 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.129343 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5dc68bd5-9z7rv"] Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.129468 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.139164 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxpc7\" (UniqueName: \"kubernetes.io/projected/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-kube-api-access-xxpc7\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.139710 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-config-data\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.178382 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.222643 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-ovsdbserver-sb\") pod \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.223612 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmnm7\" (UniqueName: \"kubernetes.io/projected/7b18ae77-9686-4c7b-a557-8f5375c62fbb-kube-api-access-tmnm7\") pod \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.223671 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-config\") pod \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.223735 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-dns-svc\") pod \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.223851 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-dns-swift-storage-0\") pod \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.223904 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-ovsdbserver-nb\") pod \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\" (UID: \"7b18ae77-9686-4c7b-a557-8f5375c62fbb\") " Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.224148 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrmfj\" (UniqueName: \"kubernetes.io/projected/0261d435-deca-4eeb-adc5-04ae46321ec8-kube-api-access-nrmfj\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.224188 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.224262 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.224282 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.224340 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.224371 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0261d435-deca-4eeb-adc5-04ae46321ec8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.224657 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0261d435-deca-4eeb-adc5-04ae46321ec8-logs\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.226339 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.227406 5055 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.227660 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0261d435-deca-4eeb-adc5-04ae46321ec8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.227830 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0261d435-deca-4eeb-adc5-04ae46321ec8-logs\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.231936 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.231967 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.233135 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.233419 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b18ae77-9686-4c7b-a557-8f5375c62fbb-kube-api-access-tmnm7" (OuterVolumeSpecName: "kube-api-access-tmnm7") pod "7b18ae77-9686-4c7b-a557-8f5375c62fbb" (UID: "7b18ae77-9686-4c7b-a557-8f5375c62fbb"). InnerVolumeSpecName "kube-api-access-tmnm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.255671 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.259596 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrmfj\" (UniqueName: \"kubernetes.io/projected/0261d435-deca-4eeb-adc5-04ae46321ec8-kube-api-access-nrmfj\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.265125 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-6lhht"] Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.272073 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: W1011 07:14:11.276267 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod11f3f545_5226_44e9_956b_1c79012e5a74.slice/crio-45b05906ba3cb5ba034eaace1399807f4ac55befd3b78fb958c1a54907e17e39 WatchSource:0}: Error finding container 45b05906ba3cb5ba034eaace1399807f4ac55befd3b78fb958c1a54907e17e39: Status 404 returned error can't find the container with id 45b05906ba3cb5ba034eaace1399807f4ac55befd3b78fb958c1a54907e17e39 Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.306260 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7b18ae77-9686-4c7b-a557-8f5375c62fbb" (UID: "7b18ae77-9686-4c7b-a557-8f5375c62fbb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.311486 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-config" (OuterVolumeSpecName: "config") pod "7b18ae77-9686-4c7b-a557-8f5375c62fbb" (UID: "7b18ae77-9686-4c7b-a557-8f5375c62fbb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.312585 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7b18ae77-9686-4c7b-a557-8f5375c62fbb" (UID: "7b18ae77-9686-4c7b-a557-8f5375c62fbb"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.315505 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7b18ae77-9686-4c7b-a557-8f5375c62fbb" (UID: "7b18ae77-9686-4c7b-a557-8f5375c62fbb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.321171 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7b18ae77-9686-4c7b-a557-8f5375c62fbb" (UID: "7b18ae77-9686-4c7b-a557-8f5375c62fbb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.328027 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.328067 5055 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.328079 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.328088 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.328097 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmnm7\" (UniqueName: \"kubernetes.io/projected/7b18ae77-9686-4c7b-a557-8f5375c62fbb-kube-api-access-tmnm7\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.328108 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b18ae77-9686-4c7b-a557-8f5375c62fbb-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.388073 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.419042 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.543077 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bgwxw" event={"ID":"84c264e3-e231-408d-8110-bb7163f5cffd","Type":"ContainerStarted","Data":"d09c619816ad3608f0b8742bcf9e0a74adc342bdff3f2bda266f3f8ea0d63c19"} Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.543131 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bgwxw" event={"ID":"84c264e3-e231-408d-8110-bb7163f5cffd","Type":"ContainerStarted","Data":"b494c67f01c5452a7e477059f838af60923fae876bf64539611521b5bf8d109b"} Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.553258 5055 generic.go:334] "Generic (PLEG): container finished" podID="6d33d3de-28bb-42fe-b7fb-384e64c2167d" containerID="db8a979ca7f87186f35f6e2c32d9c50cf89a567996449e9cfa07c1b1ef700390" exitCode=0 Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.553465 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" event={"ID":"6d33d3de-28bb-42fe-b7fb-384e64c2167d","Type":"ContainerDied","Data":"db8a979ca7f87186f35f6e2c32d9c50cf89a567996449e9cfa07c1b1ef700390"} Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.553490 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" event={"ID":"6d33d3de-28bb-42fe-b7fb-384e64c2167d","Type":"ContainerStarted","Data":"1431f3e888cf7fbc2bd337ef661b26a84479f5bd7032938ff76451fb55076058"} Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.581075 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-6lhht" event={"ID":"11f3f545-5226-44e9-956b-1c79012e5a74","Type":"ContainerStarted","Data":"45b05906ba3cb5ba034eaace1399807f4ac55befd3b78fb958c1a54907e17e39"} Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.596107 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-bgwxw" podStartSLOduration=2.5960910459999997 podStartE2EDuration="2.596091046s" podCreationTimestamp="2025-10-11 07:14:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:14:11.572212581 +0000 UTC m=+1235.346486388" watchObservedRunningTime="2025-10-11 07:14:11.596091046 +0000 UTC m=+1235.370364853" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.611932 5055 generic.go:334] "Generic (PLEG): container finished" podID="7b18ae77-9686-4c7b-a557-8f5375c62fbb" containerID="2da37814ec97e153799bfb62e2e6038db861eb49cc92a041c3dbfa5208350c06" exitCode=0 Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.612041 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795846498c-5rms7" event={"ID":"7b18ae77-9686-4c7b-a557-8f5375c62fbb","Type":"ContainerDied","Data":"2da37814ec97e153799bfb62e2e6038db861eb49cc92a041c3dbfa5208350c06"} Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.612260 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795846498c-5rms7" event={"ID":"7b18ae77-9686-4c7b-a557-8f5375c62fbb","Type":"ContainerDied","Data":"e920cd22506b8c22667947690e50a7940355299861bc096b3f1349586a9425d5"} Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.612278 5055 scope.go:117] "RemoveContainer" containerID="2da37814ec97e153799bfb62e2e6038db861eb49cc92a041c3dbfa5208350c06" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.612065 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-795846498c-5rms7" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.618029 5055 generic.go:334] "Generic (PLEG): container finished" podID="02b7eb87-002d-454c-bc42-efe7ffdd18e3" containerID="5e5268369fdabdd2617d30772fabc77e48b8e70f7fdfb630347d834889fb431d" exitCode=0 Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.618110 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" event={"ID":"02b7eb87-002d-454c-bc42-efe7ffdd18e3","Type":"ContainerDied","Data":"5e5268369fdabdd2617d30772fabc77e48b8e70f7fdfb630347d834889fb431d"} Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.618139 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" event={"ID":"02b7eb87-002d-454c-bc42-efe7ffdd18e3","Type":"ContainerStarted","Data":"c1e172df710a98505e8e29e5a20bcac5e7f0f26232439c8ccc35c17d7027e9eb"} Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.622541 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3908605d-c4b6-4df0-84a7-1137a9928b09","Type":"ContainerStarted","Data":"9dc29d93eff33329f28fa4fe8fa49ca84374f675cb9b53bc7557fb53ba019d8a"} Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.640602 5055 scope.go:117] "RemoveContainer" containerID="0db0dd873de18d856fdda4c1474ed17a4dc798d3b87f3c3db5d2e5e449c6a80e" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.669647 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-795846498c-5rms7"] Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.678051 5055 scope.go:117] "RemoveContainer" containerID="2da37814ec97e153799bfb62e2e6038db861eb49cc92a041c3dbfa5208350c06" Oct 11 07:14:11 crc kubenswrapper[5055]: E1011 07:14:11.678755 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2da37814ec97e153799bfb62e2e6038db861eb49cc92a041c3dbfa5208350c06\": container with ID starting with 2da37814ec97e153799bfb62e2e6038db861eb49cc92a041c3dbfa5208350c06 not found: ID does not exist" containerID="2da37814ec97e153799bfb62e2e6038db861eb49cc92a041c3dbfa5208350c06" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.678800 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2da37814ec97e153799bfb62e2e6038db861eb49cc92a041c3dbfa5208350c06"} err="failed to get container status \"2da37814ec97e153799bfb62e2e6038db861eb49cc92a041c3dbfa5208350c06\": rpc error: code = NotFound desc = could not find container \"2da37814ec97e153799bfb62e2e6038db861eb49cc92a041c3dbfa5208350c06\": container with ID starting with 2da37814ec97e153799bfb62e2e6038db861eb49cc92a041c3dbfa5208350c06 not found: ID does not exist" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.678825 5055 scope.go:117] "RemoveContainer" containerID="0db0dd873de18d856fdda4c1474ed17a4dc798d3b87f3c3db5d2e5e449c6a80e" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.687605 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-795846498c-5rms7"] Oct 11 07:14:11 crc kubenswrapper[5055]: E1011 07:14:11.688836 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0db0dd873de18d856fdda4c1474ed17a4dc798d3b87f3c3db5d2e5e449c6a80e\": container with ID starting with 0db0dd873de18d856fdda4c1474ed17a4dc798d3b87f3c3db5d2e5e449c6a80e not found: ID does not exist" containerID="0db0dd873de18d856fdda4c1474ed17a4dc798d3b87f3c3db5d2e5e449c6a80e" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.689967 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0db0dd873de18d856fdda4c1474ed17a4dc798d3b87f3c3db5d2e5e449c6a80e"} err="failed to get container status \"0db0dd873de18d856fdda4c1474ed17a4dc798d3b87f3c3db5d2e5e449c6a80e\": rpc error: code = NotFound desc = could not find container \"0db0dd873de18d856fdda4c1474ed17a4dc798d3b87f3c3db5d2e5e449c6a80e\": container with ID starting with 0db0dd873de18d856fdda4c1474ed17a4dc798d3b87f3c3db5d2e5e449c6a80e not found: ID does not exist" Oct 11 07:14:11 crc kubenswrapper[5055]: I1011 07:14:11.966056 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:14:11 crc kubenswrapper[5055]: W1011 07:14:11.972901 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda22f9eb8_ac6b_48dc_bfa6_51532de9962a.slice/crio-6649bb9bf5ed6b9b849625f89cde38ca60d1612eb9222f7dd267023a8cad2ce9 WatchSource:0}: Error finding container 6649bb9bf5ed6b9b849625f89cde38ca60d1612eb9222f7dd267023a8cad2ce9: Status 404 returned error can't find the container with id 6649bb9bf5ed6b9b849625f89cde38ca60d1612eb9222f7dd267023a8cad2ce9 Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.277287 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.302819 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.368120 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4xr2\" (UniqueName: \"kubernetes.io/projected/6d33d3de-28bb-42fe-b7fb-384e64c2167d-kube-api-access-q4xr2\") pod \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.368171 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-ovsdbserver-sb\") pod \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.368202 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-ovsdbserver-nb\") pod \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.368219 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-dns-svc\") pod \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.368277 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-dns-swift-storage-0\") pod \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.368299 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-config\") pod \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\" (UID: \"6d33d3de-28bb-42fe-b7fb-384e64c2167d\") " Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.379147 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d33d3de-28bb-42fe-b7fb-384e64c2167d-kube-api-access-q4xr2" (OuterVolumeSpecName: "kube-api-access-q4xr2") pod "6d33d3de-28bb-42fe-b7fb-384e64c2167d" (UID: "6d33d3de-28bb-42fe-b7fb-384e64c2167d"). InnerVolumeSpecName "kube-api-access-q4xr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.393194 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6d33d3de-28bb-42fe-b7fb-384e64c2167d" (UID: "6d33d3de-28bb-42fe-b7fb-384e64c2167d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.399151 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6d33d3de-28bb-42fe-b7fb-384e64c2167d" (UID: "6d33d3de-28bb-42fe-b7fb-384e64c2167d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.399838 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6d33d3de-28bb-42fe-b7fb-384e64c2167d" (UID: "6d33d3de-28bb-42fe-b7fb-384e64c2167d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.409135 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6d33d3de-28bb-42fe-b7fb-384e64c2167d" (UID: "6d33d3de-28bb-42fe-b7fb-384e64c2167d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.412173 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-config" (OuterVolumeSpecName: "config") pod "6d33d3de-28bb-42fe-b7fb-384e64c2167d" (UID: "6d33d3de-28bb-42fe-b7fb-384e64c2167d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.478933 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4xr2\" (UniqueName: \"kubernetes.io/projected/6d33d3de-28bb-42fe-b7fb-384e64c2167d-kube-api-access-q4xr2\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.478972 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.478981 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.478990 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.478999 5055 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.479008 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d33d3de-28bb-42fe-b7fb-384e64c2167d-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.673529 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" event={"ID":"6d33d3de-28bb-42fe-b7fb-384e64c2167d","Type":"ContainerDied","Data":"1431f3e888cf7fbc2bd337ef661b26a84479f5bd7032938ff76451fb55076058"} Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.673579 5055 scope.go:117] "RemoveContainer" containerID="db8a979ca7f87186f35f6e2c32d9c50cf89a567996449e9cfa07c1b1ef700390" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.673671 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.681001 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a22f9eb8-ac6b-48dc-bfa6-51532de9962a","Type":"ContainerStarted","Data":"6649bb9bf5ed6b9b849625f89cde38ca60d1612eb9222f7dd267023a8cad2ce9"} Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.693996 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" event={"ID":"02b7eb87-002d-454c-bc42-efe7ffdd18e3","Type":"ContainerStarted","Data":"5049badca7435a38a8ff7e28892d2b03b19c39cd0a2e43bf3dd847a448880928"} Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.694156 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.699824 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0261d435-deca-4eeb-adc5-04ae46321ec8","Type":"ContainerStarted","Data":"5886b4a4ed0fcc60796a7691cd0fb0d4476ca064961a0ca4a7a249ee3096fb51"} Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.725677 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.737423 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" podStartSLOduration=2.737403672 podStartE2EDuration="2.737403672s" podCreationTimestamp="2025-10-11 07:14:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:14:12.726249477 +0000 UTC m=+1236.500523294" watchObservedRunningTime="2025-10-11 07:14:12.737403672 +0000 UTC m=+1236.511677479" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.845561 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5"] Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.897845 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b4bfdd7f7-5dtg5"] Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.929192 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.935596 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.941246 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-bk7mn"] Oct 11 07:14:12 crc kubenswrapper[5055]: E1011 07:14:12.941729 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b18ae77-9686-4c7b-a557-8f5375c62fbb" containerName="init" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.941756 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b18ae77-9686-4c7b-a557-8f5375c62fbb" containerName="init" Oct 11 07:14:12 crc kubenswrapper[5055]: E1011 07:14:12.941812 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d33d3de-28bb-42fe-b7fb-384e64c2167d" containerName="init" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.941821 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d33d3de-28bb-42fe-b7fb-384e64c2167d" containerName="init" Oct 11 07:14:12 crc kubenswrapper[5055]: E1011 07:14:12.941834 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b18ae77-9686-4c7b-a557-8f5375c62fbb" containerName="dnsmasq-dns" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.941841 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b18ae77-9686-4c7b-a557-8f5375c62fbb" containerName="dnsmasq-dns" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.942060 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b18ae77-9686-4c7b-a557-8f5375c62fbb" containerName="dnsmasq-dns" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.942080 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d33d3de-28bb-42fe-b7fb-384e64c2167d" containerName="init" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.946636 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-bk7mn" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.947199 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-bk7mn"] Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.955223 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Oct 11 07:14:12 crc kubenswrapper[5055]: I1011 07:14:12.956429 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-vf7lq" Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.000288 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c34e566-ca8a-4c34-a24e-5401cf63666b-db-sync-config-data\") pod \"barbican-db-sync-bk7mn\" (UID: \"4c34e566-ca8a-4c34-a24e-5401cf63666b\") " pod="openstack/barbican-db-sync-bk7mn" Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.000366 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hp8ht\" (UniqueName: \"kubernetes.io/projected/4c34e566-ca8a-4c34-a24e-5401cf63666b-kube-api-access-hp8ht\") pod \"barbican-db-sync-bk7mn\" (UID: \"4c34e566-ca8a-4c34-a24e-5401cf63666b\") " pod="openstack/barbican-db-sync-bk7mn" Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.000447 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c34e566-ca8a-4c34-a24e-5401cf63666b-combined-ca-bundle\") pod \"barbican-db-sync-bk7mn\" (UID: \"4c34e566-ca8a-4c34-a24e-5401cf63666b\") " pod="openstack/barbican-db-sync-bk7mn" Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.037755 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d33d3de-28bb-42fe-b7fb-384e64c2167d" path="/var/lib/kubelet/pods/6d33d3de-28bb-42fe-b7fb-384e64c2167d/volumes" Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.038567 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b18ae77-9686-4c7b-a557-8f5375c62fbb" path="/var/lib/kubelet/pods/7b18ae77-9686-4c7b-a557-8f5375c62fbb/volumes" Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.101720 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hp8ht\" (UniqueName: \"kubernetes.io/projected/4c34e566-ca8a-4c34-a24e-5401cf63666b-kube-api-access-hp8ht\") pod \"barbican-db-sync-bk7mn\" (UID: \"4c34e566-ca8a-4c34-a24e-5401cf63666b\") " pod="openstack/barbican-db-sync-bk7mn" Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.102229 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c34e566-ca8a-4c34-a24e-5401cf63666b-combined-ca-bundle\") pod \"barbican-db-sync-bk7mn\" (UID: \"4c34e566-ca8a-4c34-a24e-5401cf63666b\") " pod="openstack/barbican-db-sync-bk7mn" Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.102280 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c34e566-ca8a-4c34-a24e-5401cf63666b-db-sync-config-data\") pod \"barbican-db-sync-bk7mn\" (UID: \"4c34e566-ca8a-4c34-a24e-5401cf63666b\") " pod="openstack/barbican-db-sync-bk7mn" Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.118624 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c34e566-ca8a-4c34-a24e-5401cf63666b-db-sync-config-data\") pod \"barbican-db-sync-bk7mn\" (UID: \"4c34e566-ca8a-4c34-a24e-5401cf63666b\") " pod="openstack/barbican-db-sync-bk7mn" Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.119195 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c34e566-ca8a-4c34-a24e-5401cf63666b-combined-ca-bundle\") pod \"barbican-db-sync-bk7mn\" (UID: \"4c34e566-ca8a-4c34-a24e-5401cf63666b\") " pod="openstack/barbican-db-sync-bk7mn" Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.127415 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hp8ht\" (UniqueName: \"kubernetes.io/projected/4c34e566-ca8a-4c34-a24e-5401cf63666b-kube-api-access-hp8ht\") pod \"barbican-db-sync-bk7mn\" (UID: \"4c34e566-ca8a-4c34-a24e-5401cf63666b\") " pod="openstack/barbican-db-sync-bk7mn" Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.288642 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-bk7mn" Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.719069 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a22f9eb8-ac6b-48dc-bfa6-51532de9962a","Type":"ContainerStarted","Data":"bd47c1e5c762bf3369cf0794a7bd76c76e644f7f839b5ceb228042a88aed775e"} Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.721018 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0261d435-deca-4eeb-adc5-04ae46321ec8","Type":"ContainerStarted","Data":"9fda0b008ef01da1d1e9eedbff2afdb1728f39de63e0959b6607ad0d7a2ff09c"} Oct 11 07:14:13 crc kubenswrapper[5055]: I1011 07:14:13.792828 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-bk7mn"] Oct 11 07:14:13 crc kubenswrapper[5055]: W1011 07:14:13.804648 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c34e566_ca8a_4c34_a24e_5401cf63666b.slice/crio-5b7cd1b4fadd3f5483b21b2ad2da8f7536b063aafb6728e08d4588ef9bbfc4e2 WatchSource:0}: Error finding container 5b7cd1b4fadd3f5483b21b2ad2da8f7536b063aafb6728e08d4588ef9bbfc4e2: Status 404 returned error can't find the container with id 5b7cd1b4fadd3f5483b21b2ad2da8f7536b063aafb6728e08d4588ef9bbfc4e2 Oct 11 07:14:14 crc kubenswrapper[5055]: I1011 07:14:14.735808 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a22f9eb8-ac6b-48dc-bfa6-51532de9962a","Type":"ContainerStarted","Data":"d5826852f5a7ec193e383ce9c7b40ef54e48636bd04ea02fbdc3e35d19b254ef"} Oct 11 07:14:14 crc kubenswrapper[5055]: I1011 07:14:14.735964 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a22f9eb8-ac6b-48dc-bfa6-51532de9962a" containerName="glance-log" containerID="cri-o://bd47c1e5c762bf3369cf0794a7bd76c76e644f7f839b5ceb228042a88aed775e" gracePeriod=30 Oct 11 07:14:14 crc kubenswrapper[5055]: I1011 07:14:14.736199 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a22f9eb8-ac6b-48dc-bfa6-51532de9962a" containerName="glance-httpd" containerID="cri-o://d5826852f5a7ec193e383ce9c7b40ef54e48636bd04ea02fbdc3e35d19b254ef" gracePeriod=30 Oct 11 07:14:14 crc kubenswrapper[5055]: I1011 07:14:14.741001 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0261d435-deca-4eeb-adc5-04ae46321ec8" containerName="glance-log" containerID="cri-o://9fda0b008ef01da1d1e9eedbff2afdb1728f39de63e0959b6607ad0d7a2ff09c" gracePeriod=30 Oct 11 07:14:14 crc kubenswrapper[5055]: I1011 07:14:14.741039 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0261d435-deca-4eeb-adc5-04ae46321ec8","Type":"ContainerStarted","Data":"dd07aa6984f4c16260939d47a3882ab926cde1f39239a5c4bdc12d1ce0affbbf"} Oct 11 07:14:14 crc kubenswrapper[5055]: I1011 07:14:14.741145 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0261d435-deca-4eeb-adc5-04ae46321ec8" containerName="glance-httpd" containerID="cri-o://dd07aa6984f4c16260939d47a3882ab926cde1f39239a5c4bdc12d1ce0affbbf" gracePeriod=30 Oct 11 07:14:14 crc kubenswrapper[5055]: I1011 07:14:14.743557 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-bk7mn" event={"ID":"4c34e566-ca8a-4c34-a24e-5401cf63666b","Type":"ContainerStarted","Data":"5b7cd1b4fadd3f5483b21b2ad2da8f7536b063aafb6728e08d4588ef9bbfc4e2"} Oct 11 07:14:14 crc kubenswrapper[5055]: I1011 07:14:14.797892 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.797871498 podStartE2EDuration="5.797871498s" podCreationTimestamp="2025-10-11 07:14:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:14:14.769704511 +0000 UTC m=+1238.543978318" watchObservedRunningTime="2025-10-11 07:14:14.797871498 +0000 UTC m=+1238.572145305" Oct 11 07:14:14 crc kubenswrapper[5055]: I1011 07:14:14.799751 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.799745121 podStartE2EDuration="5.799745121s" podCreationTimestamp="2025-10-11 07:14:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:14:14.794583845 +0000 UTC m=+1238.568857652" watchObservedRunningTime="2025-10-11 07:14:14.799745121 +0000 UTC m=+1238.574018918" Oct 11 07:14:15 crc kubenswrapper[5055]: I1011 07:14:15.751819 5055 generic.go:334] "Generic (PLEG): container finished" podID="0261d435-deca-4eeb-adc5-04ae46321ec8" containerID="dd07aa6984f4c16260939d47a3882ab926cde1f39239a5c4bdc12d1ce0affbbf" exitCode=0 Oct 11 07:14:15 crc kubenswrapper[5055]: I1011 07:14:15.751850 5055 generic.go:334] "Generic (PLEG): container finished" podID="0261d435-deca-4eeb-adc5-04ae46321ec8" containerID="9fda0b008ef01da1d1e9eedbff2afdb1728f39de63e0959b6607ad0d7a2ff09c" exitCode=143 Oct 11 07:14:15 crc kubenswrapper[5055]: I1011 07:14:15.751900 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0261d435-deca-4eeb-adc5-04ae46321ec8","Type":"ContainerDied","Data":"dd07aa6984f4c16260939d47a3882ab926cde1f39239a5c4bdc12d1ce0affbbf"} Oct 11 07:14:15 crc kubenswrapper[5055]: I1011 07:14:15.751939 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0261d435-deca-4eeb-adc5-04ae46321ec8","Type":"ContainerDied","Data":"9fda0b008ef01da1d1e9eedbff2afdb1728f39de63e0959b6607ad0d7a2ff09c"} Oct 11 07:14:15 crc kubenswrapper[5055]: I1011 07:14:15.754551 5055 generic.go:334] "Generic (PLEG): container finished" podID="84c264e3-e231-408d-8110-bb7163f5cffd" containerID="d09c619816ad3608f0b8742bcf9e0a74adc342bdff3f2bda266f3f8ea0d63c19" exitCode=0 Oct 11 07:14:15 crc kubenswrapper[5055]: I1011 07:14:15.754599 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bgwxw" event={"ID":"84c264e3-e231-408d-8110-bb7163f5cffd","Type":"ContainerDied","Data":"d09c619816ad3608f0b8742bcf9e0a74adc342bdff3f2bda266f3f8ea0d63c19"} Oct 11 07:14:15 crc kubenswrapper[5055]: I1011 07:14:15.757267 5055 generic.go:334] "Generic (PLEG): container finished" podID="a22f9eb8-ac6b-48dc-bfa6-51532de9962a" containerID="d5826852f5a7ec193e383ce9c7b40ef54e48636bd04ea02fbdc3e35d19b254ef" exitCode=0 Oct 11 07:14:15 crc kubenswrapper[5055]: I1011 07:14:15.757371 5055 generic.go:334] "Generic (PLEG): container finished" podID="a22f9eb8-ac6b-48dc-bfa6-51532de9962a" containerID="bd47c1e5c762bf3369cf0794a7bd76c76e644f7f839b5ceb228042a88aed775e" exitCode=143 Oct 11 07:14:15 crc kubenswrapper[5055]: I1011 07:14:15.757313 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a22f9eb8-ac6b-48dc-bfa6-51532de9962a","Type":"ContainerDied","Data":"d5826852f5a7ec193e383ce9c7b40ef54e48636bd04ea02fbdc3e35d19b254ef"} Oct 11 07:14:15 crc kubenswrapper[5055]: I1011 07:14:15.757454 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a22f9eb8-ac6b-48dc-bfa6-51532de9962a","Type":"ContainerDied","Data":"bd47c1e5c762bf3369cf0794a7bd76c76e644f7f839b5ceb228042a88aed775e"} Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.420606 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-0232-account-create-rnfzp"] Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.422353 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0232-account-create-rnfzp" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.424113 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.438101 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-0232-account-create-rnfzp"] Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.493864 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2r7xq\" (UniqueName: \"kubernetes.io/projected/6cd2b520-4d17-4d96-a0d1-a6bdf242f973-kube-api-access-2r7xq\") pod \"cinder-0232-account-create-rnfzp\" (UID: \"6cd2b520-4d17-4d96-a0d1-a6bdf242f973\") " pod="openstack/cinder-0232-account-create-rnfzp" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.596204 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2r7xq\" (UniqueName: \"kubernetes.io/projected/6cd2b520-4d17-4d96-a0d1-a6bdf242f973-kube-api-access-2r7xq\") pod \"cinder-0232-account-create-rnfzp\" (UID: \"6cd2b520-4d17-4d96-a0d1-a6bdf242f973\") " pod="openstack/cinder-0232-account-create-rnfzp" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.618972 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2r7xq\" (UniqueName: \"kubernetes.io/projected/6cd2b520-4d17-4d96-a0d1-a6bdf242f973-kube-api-access-2r7xq\") pod \"cinder-0232-account-create-rnfzp\" (UID: \"6cd2b520-4d17-4d96-a0d1-a6bdf242f973\") " pod="openstack/cinder-0232-account-create-rnfzp" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.620011 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-b491-account-create-rmjgx"] Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.621340 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b491-account-create-rmjgx" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.623251 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.633606 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-b491-account-create-rmjgx"] Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.698016 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cb49q\" (UniqueName: \"kubernetes.io/projected/cb84efeb-f131-48fc-b283-c6cb0db40cbe-kube-api-access-cb49q\") pod \"neutron-b491-account-create-rmjgx\" (UID: \"cb84efeb-f131-48fc-b283-c6cb0db40cbe\") " pod="openstack/neutron-b491-account-create-rmjgx" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.747654 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0232-account-create-rnfzp" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.783628 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bgwxw" event={"ID":"84c264e3-e231-408d-8110-bb7163f5cffd","Type":"ContainerDied","Data":"b494c67f01c5452a7e477059f838af60923fae876bf64539611521b5bf8d109b"} Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.783674 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b494c67f01c5452a7e477059f838af60923fae876bf64539611521b5bf8d109b" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.792803 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a22f9eb8-ac6b-48dc-bfa6-51532de9962a","Type":"ContainerDied","Data":"6649bb9bf5ed6b9b849625f89cde38ca60d1612eb9222f7dd267023a8cad2ce9"} Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.792847 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6649bb9bf5ed6b9b849625f89cde38ca60d1612eb9222f7dd267023a8cad2ce9" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.807652 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cb49q\" (UniqueName: \"kubernetes.io/projected/cb84efeb-f131-48fc-b283-c6cb0db40cbe-kube-api-access-cb49q\") pod \"neutron-b491-account-create-rmjgx\" (UID: \"cb84efeb-f131-48fc-b283-c6cb0db40cbe\") " pod="openstack/neutron-b491-account-create-rmjgx" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.826045 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cb49q\" (UniqueName: \"kubernetes.io/projected/cb84efeb-f131-48fc-b283-c6cb0db40cbe-kube-api-access-cb49q\") pod \"neutron-b491-account-create-rmjgx\" (UID: \"cb84efeb-f131-48fc-b283-c6cb0db40cbe\") " pod="openstack/neutron-b491-account-create-rmjgx" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.834620 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.861055 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.908982 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-credential-keys\") pod \"84c264e3-e231-408d-8110-bb7163f5cffd\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.909053 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-combined-ca-bundle\") pod \"84c264e3-e231-408d-8110-bb7163f5cffd\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.909090 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-config-data\") pod \"84c264e3-e231-408d-8110-bb7163f5cffd\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.909195 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjg4f\" (UniqueName: \"kubernetes.io/projected/84c264e3-e231-408d-8110-bb7163f5cffd-kube-api-access-zjg4f\") pod \"84c264e3-e231-408d-8110-bb7163f5cffd\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.909267 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-fernet-keys\") pod \"84c264e3-e231-408d-8110-bb7163f5cffd\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.909316 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-scripts\") pod \"84c264e3-e231-408d-8110-bb7163f5cffd\" (UID: \"84c264e3-e231-408d-8110-bb7163f5cffd\") " Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.922404 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "84c264e3-e231-408d-8110-bb7163f5cffd" (UID: "84c264e3-e231-408d-8110-bb7163f5cffd"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.922698 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-scripts" (OuterVolumeSpecName: "scripts") pod "84c264e3-e231-408d-8110-bb7163f5cffd" (UID: "84c264e3-e231-408d-8110-bb7163f5cffd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.923687 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "84c264e3-e231-408d-8110-bb7163f5cffd" (UID: "84c264e3-e231-408d-8110-bb7163f5cffd"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.925970 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84c264e3-e231-408d-8110-bb7163f5cffd-kube-api-access-zjg4f" (OuterVolumeSpecName: "kube-api-access-zjg4f") pod "84c264e3-e231-408d-8110-bb7163f5cffd" (UID: "84c264e3-e231-408d-8110-bb7163f5cffd"). InnerVolumeSpecName "kube-api-access-zjg4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.969008 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-config-data" (OuterVolumeSpecName: "config-data") pod "84c264e3-e231-408d-8110-bb7163f5cffd" (UID: "84c264e3-e231-408d-8110-bb7163f5cffd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.970819 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b491-account-create-rmjgx" Oct 11 07:14:17 crc kubenswrapper[5055]: I1011 07:14:17.971990 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "84c264e3-e231-408d-8110-bb7163f5cffd" (UID: "84c264e3-e231-408d-8110-bb7163f5cffd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.010901 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.010968 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-httpd-run\") pod \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.011000 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-public-tls-certs\") pod \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.011045 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxpc7\" (UniqueName: \"kubernetes.io/projected/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-kube-api-access-xxpc7\") pod \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.011065 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-config-data\") pod \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.011098 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-scripts\") pod \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.011138 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-logs\") pod \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.011190 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-combined-ca-bundle\") pod \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\" (UID: \"a22f9eb8-ac6b-48dc-bfa6-51532de9962a\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.011559 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.011572 5055 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-credential-keys\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.011583 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.011596 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.011607 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjg4f\" (UniqueName: \"kubernetes.io/projected/84c264e3-e231-408d-8110-bb7163f5cffd-kube-api-access-zjg4f\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.011615 5055 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/84c264e3-e231-408d-8110-bb7163f5cffd-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.012892 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a22f9eb8-ac6b-48dc-bfa6-51532de9962a" (UID: "a22f9eb8-ac6b-48dc-bfa6-51532de9962a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.013072 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-logs" (OuterVolumeSpecName: "logs") pod "a22f9eb8-ac6b-48dc-bfa6-51532de9962a" (UID: "a22f9eb8-ac6b-48dc-bfa6-51532de9962a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.016075 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-scripts" (OuterVolumeSpecName: "scripts") pod "a22f9eb8-ac6b-48dc-bfa6-51532de9962a" (UID: "a22f9eb8-ac6b-48dc-bfa6-51532de9962a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.016320 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "a22f9eb8-ac6b-48dc-bfa6-51532de9962a" (UID: "a22f9eb8-ac6b-48dc-bfa6-51532de9962a"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.016903 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-kube-api-access-xxpc7" (OuterVolumeSpecName: "kube-api-access-xxpc7") pod "a22f9eb8-ac6b-48dc-bfa6-51532de9962a" (UID: "a22f9eb8-ac6b-48dc-bfa6-51532de9962a"). InnerVolumeSpecName "kube-api-access-xxpc7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.069130 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a22f9eb8-ac6b-48dc-bfa6-51532de9962a" (UID: "a22f9eb8-ac6b-48dc-bfa6-51532de9962a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.091149 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-config-data" (OuterVolumeSpecName: "config-data") pod "a22f9eb8-ac6b-48dc-bfa6-51532de9962a" (UID: "a22f9eb8-ac6b-48dc-bfa6-51532de9962a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.105447 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a22f9eb8-ac6b-48dc-bfa6-51532de9962a" (UID: "a22f9eb8-ac6b-48dc-bfa6-51532de9962a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.116424 5055 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.116468 5055 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.116482 5055 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.116498 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxpc7\" (UniqueName: \"kubernetes.io/projected/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-kube-api-access-xxpc7\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.116511 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.116523 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.116535 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.116545 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a22f9eb8-ac6b-48dc-bfa6-51532de9962a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.147268 5055 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.187585 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.218009 5055 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.317197 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-0232-account-create-rnfzp"] Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.318670 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-internal-tls-certs\") pod \"0261d435-deca-4eeb-adc5-04ae46321ec8\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.318730 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-config-data\") pod \"0261d435-deca-4eeb-adc5-04ae46321ec8\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.318807 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-combined-ca-bundle\") pod \"0261d435-deca-4eeb-adc5-04ae46321ec8\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.318925 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrmfj\" (UniqueName: \"kubernetes.io/projected/0261d435-deca-4eeb-adc5-04ae46321ec8-kube-api-access-nrmfj\") pod \"0261d435-deca-4eeb-adc5-04ae46321ec8\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.319022 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0261d435-deca-4eeb-adc5-04ae46321ec8-httpd-run\") pod \"0261d435-deca-4eeb-adc5-04ae46321ec8\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.319050 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0261d435-deca-4eeb-adc5-04ae46321ec8-logs\") pod \"0261d435-deca-4eeb-adc5-04ae46321ec8\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.319076 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"0261d435-deca-4eeb-adc5-04ae46321ec8\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.319124 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-scripts\") pod \"0261d435-deca-4eeb-adc5-04ae46321ec8\" (UID: \"0261d435-deca-4eeb-adc5-04ae46321ec8\") " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.319822 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0261d435-deca-4eeb-adc5-04ae46321ec8-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "0261d435-deca-4eeb-adc5-04ae46321ec8" (UID: "0261d435-deca-4eeb-adc5-04ae46321ec8"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.321599 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0261d435-deca-4eeb-adc5-04ae46321ec8-logs" (OuterVolumeSpecName: "logs") pod "0261d435-deca-4eeb-adc5-04ae46321ec8" (UID: "0261d435-deca-4eeb-adc5-04ae46321ec8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.325742 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-scripts" (OuterVolumeSpecName: "scripts") pod "0261d435-deca-4eeb-adc5-04ae46321ec8" (UID: "0261d435-deca-4eeb-adc5-04ae46321ec8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.326486 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "0261d435-deca-4eeb-adc5-04ae46321ec8" (UID: "0261d435-deca-4eeb-adc5-04ae46321ec8"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.327178 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0261d435-deca-4eeb-adc5-04ae46321ec8-kube-api-access-nrmfj" (OuterVolumeSpecName: "kube-api-access-nrmfj") pod "0261d435-deca-4eeb-adc5-04ae46321ec8" (UID: "0261d435-deca-4eeb-adc5-04ae46321ec8"). InnerVolumeSpecName "kube-api-access-nrmfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: W1011 07:14:18.327451 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cd2b520_4d17_4d96_a0d1_a6bdf242f973.slice/crio-97766415c077b62e200ea72e8932a0f8c4a08e397664c29efbe6d42d9d3ab025 WatchSource:0}: Error finding container 97766415c077b62e200ea72e8932a0f8c4a08e397664c29efbe6d42d9d3ab025: Status 404 returned error can't find the container with id 97766415c077b62e200ea72e8932a0f8c4a08e397664c29efbe6d42d9d3ab025 Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.361221 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0261d435-deca-4eeb-adc5-04ae46321ec8" (UID: "0261d435-deca-4eeb-adc5-04ae46321ec8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.371721 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0261d435-deca-4eeb-adc5-04ae46321ec8" (UID: "0261d435-deca-4eeb-adc5-04ae46321ec8"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.382819 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-config-data" (OuterVolumeSpecName: "config-data") pod "0261d435-deca-4eeb-adc5-04ae46321ec8" (UID: "0261d435-deca-4eeb-adc5-04ae46321ec8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.420524 5055 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0261d435-deca-4eeb-adc5-04ae46321ec8-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.420557 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0261d435-deca-4eeb-adc5-04ae46321ec8-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.420586 5055 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.420597 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.420605 5055 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.420613 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.420621 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0261d435-deca-4eeb-adc5-04ae46321ec8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.420629 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrmfj\" (UniqueName: \"kubernetes.io/projected/0261d435-deca-4eeb-adc5-04ae46321ec8-kube-api-access-nrmfj\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.437237 5055 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.488133 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-b491-account-create-rmjgx"] Oct 11 07:14:18 crc kubenswrapper[5055]: W1011 07:14:18.505665 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb84efeb_f131_48fc_b283_c6cb0db40cbe.slice/crio-9f46ef797c3522c46a02edc7db9cae210e3c26b3ca4b12103f03a45f305e28cb WatchSource:0}: Error finding container 9f46ef797c3522c46a02edc7db9cae210e3c26b3ca4b12103f03a45f305e28cb: Status 404 returned error can't find the container with id 9f46ef797c3522c46a02edc7db9cae210e3c26b3ca4b12103f03a45f305e28cb Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.522173 5055 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.804076 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3908605d-c4b6-4df0-84a7-1137a9928b09","Type":"ContainerStarted","Data":"b6883db390a82949d295202200b69f3df1adddf2c18d6dbfde9b90393a0fb99b"} Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.805670 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-6lhht" event={"ID":"11f3f545-5226-44e9-956b-1c79012e5a74","Type":"ContainerStarted","Data":"0d961448058531e90b1f8b9d6580cc50c771d581b5d95b0d8f2314976f162c41"} Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.807364 5055 generic.go:334] "Generic (PLEG): container finished" podID="6cd2b520-4d17-4d96-a0d1-a6bdf242f973" containerID="bc476925d6ccbab654212a5e1a766d13c073fe400387165deb5be1868fea364f" exitCode=0 Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.807409 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0232-account-create-rnfzp" event={"ID":"6cd2b520-4d17-4d96-a0d1-a6bdf242f973","Type":"ContainerDied","Data":"bc476925d6ccbab654212a5e1a766d13c073fe400387165deb5be1868fea364f"} Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.807426 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0232-account-create-rnfzp" event={"ID":"6cd2b520-4d17-4d96-a0d1-a6bdf242f973","Type":"ContainerStarted","Data":"97766415c077b62e200ea72e8932a0f8c4a08e397664c29efbe6d42d9d3ab025"} Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.809843 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0261d435-deca-4eeb-adc5-04ae46321ec8","Type":"ContainerDied","Data":"5886b4a4ed0fcc60796a7691cd0fb0d4476ca064961a0ca4a7a249ee3096fb51"} Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.809870 5055 scope.go:117] "RemoveContainer" containerID="dd07aa6984f4c16260939d47a3882ab926cde1f39239a5c4bdc12d1ce0affbbf" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.809953 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.814959 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b491-account-create-rmjgx" event={"ID":"cb84efeb-f131-48fc-b283-c6cb0db40cbe","Type":"ContainerStarted","Data":"003684d134598af05a17bfa081c9fbb69ccc7d05883cb34ca560f38b2dc5eec6"} Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.814988 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b491-account-create-rmjgx" event={"ID":"cb84efeb-f131-48fc-b283-c6cb0db40cbe","Type":"ContainerStarted","Data":"9f46ef797c3522c46a02edc7db9cae210e3c26b3ca4b12103f03a45f305e28cb"} Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.814993 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.815514 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bgwxw" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.841547 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-6lhht" podStartSLOduration=2.3331314069999998 podStartE2EDuration="8.841525945s" podCreationTimestamp="2025-10-11 07:14:10 +0000 UTC" firstStartedPulling="2025-10-11 07:14:11.278436471 +0000 UTC m=+1235.052710278" lastFinishedPulling="2025-10-11 07:14:17.786831009 +0000 UTC m=+1241.561104816" observedRunningTime="2025-10-11 07:14:18.820364696 +0000 UTC m=+1242.594638523" watchObservedRunningTime="2025-10-11 07:14:18.841525945 +0000 UTC m=+1242.615799762" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.897906 5055 scope.go:117] "RemoveContainer" containerID="9fda0b008ef01da1d1e9eedbff2afdb1728f39de63e0959b6607ad0d7a2ff09c" Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.943487 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.949637 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:14:18 crc kubenswrapper[5055]: I1011 07:14:18.979024 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-bgwxw"] Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.003556 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:14:19 crc kubenswrapper[5055]: E1011 07:14:19.004201 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0261d435-deca-4eeb-adc5-04ae46321ec8" containerName="glance-httpd" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.004221 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0261d435-deca-4eeb-adc5-04ae46321ec8" containerName="glance-httpd" Oct 11 07:14:19 crc kubenswrapper[5055]: E1011 07:14:19.004232 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a22f9eb8-ac6b-48dc-bfa6-51532de9962a" containerName="glance-httpd" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.004238 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="a22f9eb8-ac6b-48dc-bfa6-51532de9962a" containerName="glance-httpd" Oct 11 07:14:19 crc kubenswrapper[5055]: E1011 07:14:19.004249 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a22f9eb8-ac6b-48dc-bfa6-51532de9962a" containerName="glance-log" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.004256 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="a22f9eb8-ac6b-48dc-bfa6-51532de9962a" containerName="glance-log" Oct 11 07:14:19 crc kubenswrapper[5055]: E1011 07:14:19.004274 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0261d435-deca-4eeb-adc5-04ae46321ec8" containerName="glance-log" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.004279 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0261d435-deca-4eeb-adc5-04ae46321ec8" containerName="glance-log" Oct 11 07:14:19 crc kubenswrapper[5055]: E1011 07:14:19.004297 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84c264e3-e231-408d-8110-bb7163f5cffd" containerName="keystone-bootstrap" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.004304 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="84c264e3-e231-408d-8110-bb7163f5cffd" containerName="keystone-bootstrap" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.004465 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="84c264e3-e231-408d-8110-bb7163f5cffd" containerName="keystone-bootstrap" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.004479 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="0261d435-deca-4eeb-adc5-04ae46321ec8" containerName="glance-log" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.004491 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="a22f9eb8-ac6b-48dc-bfa6-51532de9962a" containerName="glance-httpd" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.004503 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="0261d435-deca-4eeb-adc5-04ae46321ec8" containerName="glance-httpd" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.004514 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="a22f9eb8-ac6b-48dc-bfa6-51532de9962a" containerName="glance-log" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.008696 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-bgwxw"] Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.008891 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.012145 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-msbtk" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.012371 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.012606 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.013880 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.036857 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84c264e3-e231-408d-8110-bb7163f5cffd" path="/var/lib/kubelet/pods/84c264e3-e231-408d-8110-bb7163f5cffd/volumes" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.037561 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a22f9eb8-ac6b-48dc-bfa6-51532de9962a" path="/var/lib/kubelet/pods/a22f9eb8-ac6b-48dc-bfa6-51532de9962a/volumes" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.038477 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.040839 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.049872 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.058919 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.060524 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.062413 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.063826 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.069171 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.081789 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-55cv5"] Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.084519 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.088623 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-v2rpk" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.089708 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.089922 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.090213 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.096079 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-55cv5"] Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.133517 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phpk5\" (UniqueName: \"kubernetes.io/projected/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-kube-api-access-phpk5\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.133567 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cc75f86e-fb96-4f2d-a852-66912a678430-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.133596 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-config-data\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.133637 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-config-data\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.133657 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.133676 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.133693 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-scripts\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.133761 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.133827 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-logs\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.133852 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.133868 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.133890 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.133943 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2p22\" (UniqueName: \"kubernetes.io/projected/cc75f86e-fb96-4f2d-a852-66912a678430-kube-api-access-p2p22\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.134041 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc75f86e-fb96-4f2d-a852-66912a678430-logs\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.134057 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-scripts\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.134075 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236081 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phpk5\" (UniqueName: \"kubernetes.io/projected/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-kube-api-access-phpk5\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236130 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cc75f86e-fb96-4f2d-a852-66912a678430-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236204 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-fernet-keys\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236240 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-config-data\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236299 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-config-data\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236326 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236357 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236381 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-scripts\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236423 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236451 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-logs\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236481 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236503 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-config-data\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236524 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236559 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hf8p9\" (UniqueName: \"kubernetes.io/projected/e8184ab7-72ce-4337-9e27-a04849f1e796-kube-api-access-hf8p9\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236583 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236614 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-combined-ca-bundle\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236647 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2p22\" (UniqueName: \"kubernetes.io/projected/cc75f86e-fb96-4f2d-a852-66912a678430-kube-api-access-p2p22\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236680 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-credential-keys\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236707 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-scripts\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236738 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc75f86e-fb96-4f2d-a852-66912a678430-logs\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236758 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-scripts\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.236802 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.237237 5055 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.237296 5055 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.237705 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc75f86e-fb96-4f2d-a852-66912a678430-logs\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.238070 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cc75f86e-fb96-4f2d-a852-66912a678430-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.238074 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-logs\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.238308 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.243474 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-scripts\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.244410 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.244582 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.245176 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-scripts\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.248452 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.250906 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.254937 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-config-data\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.255285 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-config-data\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.264271 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phpk5\" (UniqueName: \"kubernetes.io/projected/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-kube-api-access-phpk5\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.264435 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2p22\" (UniqueName: \"kubernetes.io/projected/cc75f86e-fb96-4f2d-a852-66912a678430-kube-api-access-p2p22\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.273782 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.304281 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.324195 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.339126 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-fernet-keys\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.339253 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-config-data\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.339288 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hf8p9\" (UniqueName: \"kubernetes.io/projected/e8184ab7-72ce-4337-9e27-a04849f1e796-kube-api-access-hf8p9\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.339321 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-combined-ca-bundle\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.339362 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-credential-keys\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.339385 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-scripts\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.344088 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-scripts\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.344576 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-combined-ca-bundle\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.344590 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-credential-keys\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.345003 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-config-data\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.347264 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-fernet-keys\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.358387 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hf8p9\" (UniqueName: \"kubernetes.io/projected/e8184ab7-72ce-4337-9e27-a04849f1e796-kube-api-access-hf8p9\") pod \"keystone-bootstrap-55cv5\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.377916 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.408618 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.844943 5055 generic.go:334] "Generic (PLEG): container finished" podID="cb84efeb-f131-48fc-b283-c6cb0db40cbe" containerID="003684d134598af05a17bfa081c9fbb69ccc7d05883cb34ca560f38b2dc5eec6" exitCode=0 Oct 11 07:14:19 crc kubenswrapper[5055]: I1011 07:14:19.845262 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b491-account-create-rmjgx" event={"ID":"cb84efeb-f131-48fc-b283-c6cb0db40cbe","Type":"ContainerDied","Data":"003684d134598af05a17bfa081c9fbb69ccc7d05883cb34ca560f38b2dc5eec6"} Oct 11 07:14:20 crc kubenswrapper[5055]: I1011 07:14:20.548988 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:14:20 crc kubenswrapper[5055]: I1011 07:14:20.616070 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b587f8db7-7qnbh"] Oct 11 07:14:20 crc kubenswrapper[5055]: I1011 07:14:20.616638 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" podUID="ab2cd371-182f-4750-9439-b80dea4a9310" containerName="dnsmasq-dns" containerID="cri-o://02982624e7fec032027db301139bfe8b2893cafac8ee3fb79205d98e6447ab84" gracePeriod=10 Oct 11 07:14:20 crc kubenswrapper[5055]: I1011 07:14:20.873850 5055 generic.go:334] "Generic (PLEG): container finished" podID="11f3f545-5226-44e9-956b-1c79012e5a74" containerID="0d961448058531e90b1f8b9d6580cc50c771d581b5d95b0d8f2314976f162c41" exitCode=0 Oct 11 07:14:20 crc kubenswrapper[5055]: I1011 07:14:20.873904 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-6lhht" event={"ID":"11f3f545-5226-44e9-956b-1c79012e5a74","Type":"ContainerDied","Data":"0d961448058531e90b1f8b9d6580cc50c771d581b5d95b0d8f2314976f162c41"} Oct 11 07:14:21 crc kubenswrapper[5055]: I1011 07:14:21.002271 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0261d435-deca-4eeb-adc5-04ae46321ec8" path="/var/lib/kubelet/pods/0261d435-deca-4eeb-adc5-04ae46321ec8/volumes" Oct 11 07:14:21 crc kubenswrapper[5055]: I1011 07:14:21.132966 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" podUID="ab2cd371-182f-4750-9439-b80dea4a9310" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.118:5353: connect: connection refused" Oct 11 07:14:21 crc kubenswrapper[5055]: I1011 07:14:21.886662 5055 generic.go:334] "Generic (PLEG): container finished" podID="ab2cd371-182f-4750-9439-b80dea4a9310" containerID="02982624e7fec032027db301139bfe8b2893cafac8ee3fb79205d98e6447ab84" exitCode=0 Oct 11 07:14:21 crc kubenswrapper[5055]: I1011 07:14:21.886739 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" event={"ID":"ab2cd371-182f-4750-9439-b80dea4a9310","Type":"ContainerDied","Data":"02982624e7fec032027db301139bfe8b2893cafac8ee3fb79205d98e6447ab84"} Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.482006 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.516041 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0232-account-create-rnfzp" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.523704 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b491-account-create-rmjgx" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.543633 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.622695 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-ovsdbserver-sb\") pod \"ab2cd371-182f-4750-9439-b80dea4a9310\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.622755 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-combined-ca-bundle\") pod \"11f3f545-5226-44e9-956b-1c79012e5a74\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.622828 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-config\") pod \"ab2cd371-182f-4750-9439-b80dea4a9310\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.622890 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-dns-svc\") pod \"ab2cd371-182f-4750-9439-b80dea4a9310\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.622921 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jzm74\" (UniqueName: \"kubernetes.io/projected/ab2cd371-182f-4750-9439-b80dea4a9310-kube-api-access-jzm74\") pod \"ab2cd371-182f-4750-9439-b80dea4a9310\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.622973 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/11f3f545-5226-44e9-956b-1c79012e5a74-logs\") pod \"11f3f545-5226-44e9-956b-1c79012e5a74\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.623030 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-config-data\") pod \"11f3f545-5226-44e9-956b-1c79012e5a74\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.623049 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-99gl7\" (UniqueName: \"kubernetes.io/projected/11f3f545-5226-44e9-956b-1c79012e5a74-kube-api-access-99gl7\") pod \"11f3f545-5226-44e9-956b-1c79012e5a74\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.623136 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-ovsdbserver-nb\") pod \"ab2cd371-182f-4750-9439-b80dea4a9310\" (UID: \"ab2cd371-182f-4750-9439-b80dea4a9310\") " Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.623189 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-scripts\") pod \"11f3f545-5226-44e9-956b-1c79012e5a74\" (UID: \"11f3f545-5226-44e9-956b-1c79012e5a74\") " Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.623213 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2r7xq\" (UniqueName: \"kubernetes.io/projected/6cd2b520-4d17-4d96-a0d1-a6bdf242f973-kube-api-access-2r7xq\") pod \"6cd2b520-4d17-4d96-a0d1-a6bdf242f973\" (UID: \"6cd2b520-4d17-4d96-a0d1-a6bdf242f973\") " Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.623270 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cb49q\" (UniqueName: \"kubernetes.io/projected/cb84efeb-f131-48fc-b283-c6cb0db40cbe-kube-api-access-cb49q\") pod \"cb84efeb-f131-48fc-b283-c6cb0db40cbe\" (UID: \"cb84efeb-f131-48fc-b283-c6cb0db40cbe\") " Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.624270 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/11f3f545-5226-44e9-956b-1c79012e5a74-logs" (OuterVolumeSpecName: "logs") pod "11f3f545-5226-44e9-956b-1c79012e5a74" (UID: "11f3f545-5226-44e9-956b-1c79012e5a74"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.628397 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb84efeb-f131-48fc-b283-c6cb0db40cbe-kube-api-access-cb49q" (OuterVolumeSpecName: "kube-api-access-cb49q") pod "cb84efeb-f131-48fc-b283-c6cb0db40cbe" (UID: "cb84efeb-f131-48fc-b283-c6cb0db40cbe"). InnerVolumeSpecName "kube-api-access-cb49q". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.628724 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11f3f545-5226-44e9-956b-1c79012e5a74-kube-api-access-99gl7" (OuterVolumeSpecName: "kube-api-access-99gl7") pod "11f3f545-5226-44e9-956b-1c79012e5a74" (UID: "11f3f545-5226-44e9-956b-1c79012e5a74"). InnerVolumeSpecName "kube-api-access-99gl7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.629141 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-scripts" (OuterVolumeSpecName: "scripts") pod "11f3f545-5226-44e9-956b-1c79012e5a74" (UID: "11f3f545-5226-44e9-956b-1c79012e5a74"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.634986 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cd2b520-4d17-4d96-a0d1-a6bdf242f973-kube-api-access-2r7xq" (OuterVolumeSpecName: "kube-api-access-2r7xq") pod "6cd2b520-4d17-4d96-a0d1-a6bdf242f973" (UID: "6cd2b520-4d17-4d96-a0d1-a6bdf242f973"). InnerVolumeSpecName "kube-api-access-2r7xq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.641703 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab2cd371-182f-4750-9439-b80dea4a9310-kube-api-access-jzm74" (OuterVolumeSpecName: "kube-api-access-jzm74") pod "ab2cd371-182f-4750-9439-b80dea4a9310" (UID: "ab2cd371-182f-4750-9439-b80dea4a9310"). InnerVolumeSpecName "kube-api-access-jzm74". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.663906 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-config-data" (OuterVolumeSpecName: "config-data") pod "11f3f545-5226-44e9-956b-1c79012e5a74" (UID: "11f3f545-5226-44e9-956b-1c79012e5a74"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.664260 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "11f3f545-5226-44e9-956b-1c79012e5a74" (UID: "11f3f545-5226-44e9-956b-1c79012e5a74"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.690038 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ab2cd371-182f-4750-9439-b80dea4a9310" (UID: "ab2cd371-182f-4750-9439-b80dea4a9310"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.690038 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ab2cd371-182f-4750-9439-b80dea4a9310" (UID: "ab2cd371-182f-4750-9439-b80dea4a9310"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.692029 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-config" (OuterVolumeSpecName: "config") pod "ab2cd371-182f-4750-9439-b80dea4a9310" (UID: "ab2cd371-182f-4750-9439-b80dea4a9310"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.693289 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ab2cd371-182f-4750-9439-b80dea4a9310" (UID: "ab2cd371-182f-4750-9439-b80dea4a9310"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.725549 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jzm74\" (UniqueName: \"kubernetes.io/projected/ab2cd371-182f-4750-9439-b80dea4a9310-kube-api-access-jzm74\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.725583 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/11f3f545-5226-44e9-956b-1c79012e5a74-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.725597 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.725609 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-99gl7\" (UniqueName: \"kubernetes.io/projected/11f3f545-5226-44e9-956b-1c79012e5a74-kube-api-access-99gl7\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.725620 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.725630 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2r7xq\" (UniqueName: \"kubernetes.io/projected/6cd2b520-4d17-4d96-a0d1-a6bdf242f973-kube-api-access-2r7xq\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.725640 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.725650 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cb49q\" (UniqueName: \"kubernetes.io/projected/cb84efeb-f131-48fc-b283-c6cb0db40cbe-kube-api-access-cb49q\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.725658 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.725678 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f3f545-5226-44e9-956b-1c79012e5a74-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.725688 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.725698 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab2cd371-182f-4750-9439-b80dea4a9310-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.761802 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-55cv5"] Oct 11 07:14:22 crc kubenswrapper[5055]: W1011 07:14:22.763666 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8184ab7_72ce_4337_9e27_a04849f1e796.slice/crio-7d70199ce427c97f89132348c316089ecc92bdabe32969875ac2119851bb9962 WatchSource:0}: Error finding container 7d70199ce427c97f89132348c316089ecc92bdabe32969875ac2119851bb9962: Status 404 returned error can't find the container with id 7d70199ce427c97f89132348c316089ecc92bdabe32969875ac2119851bb9962 Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.897076 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" event={"ID":"ab2cd371-182f-4750-9439-b80dea4a9310","Type":"ContainerDied","Data":"e7d2242b7073148a351af59574cfff89e46a8df720907bd2e11e6e6087496d5a"} Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.897128 5055 scope.go:117] "RemoveContainer" containerID="02982624e7fec032027db301139bfe8b2893cafac8ee3fb79205d98e6447ab84" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.897233 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b587f8db7-7qnbh" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.901256 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-6lhht" event={"ID":"11f3f545-5226-44e9-956b-1c79012e5a74","Type":"ContainerDied","Data":"45b05906ba3cb5ba034eaace1399807f4ac55befd3b78fb958c1a54907e17e39"} Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.901300 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45b05906ba3cb5ba034eaace1399807f4ac55befd3b78fb958c1a54907e17e39" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.901364 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-6lhht" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.916528 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-55cv5" event={"ID":"e8184ab7-72ce-4337-9e27-a04849f1e796","Type":"ContainerStarted","Data":"7d70199ce427c97f89132348c316089ecc92bdabe32969875ac2119851bb9962"} Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.918544 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0232-account-create-rnfzp" event={"ID":"6cd2b520-4d17-4d96-a0d1-a6bdf242f973","Type":"ContainerDied","Data":"97766415c077b62e200ea72e8932a0f8c4a08e397664c29efbe6d42d9d3ab025"} Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.918579 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97766415c077b62e200ea72e8932a0f8c4a08e397664c29efbe6d42d9d3ab025" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.918643 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0232-account-create-rnfzp" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.937276 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-bk7mn" event={"ID":"4c34e566-ca8a-4c34-a24e-5401cf63666b","Type":"ContainerStarted","Data":"fe652d39e7e6e3d258f7f666f03e71db8cb9fbfd39a1541489d6565528bf31fc"} Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.939408 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b491-account-create-rmjgx" event={"ID":"cb84efeb-f131-48fc-b283-c6cb0db40cbe","Type":"ContainerDied","Data":"9f46ef797c3522c46a02edc7db9cae210e3c26b3ca4b12103f03a45f305e28cb"} Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.939432 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b491-account-create-rmjgx" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.939434 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f46ef797c3522c46a02edc7db9cae210e3c26b3ca4b12103f03a45f305e28cb" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.952781 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3908605d-c4b6-4df0-84a7-1137a9928b09","Type":"ContainerStarted","Data":"9234f535538a79d8c0d5a8de448290234ec0ec0b2d6be0b128ca234b1ee96c3d"} Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.966962 5055 scope.go:117] "RemoveContainer" containerID="fe4d85793b7889616858f460e97af72194d7d4116c8bb6b285c6ab826f170a81" Oct 11 07:14:22 crc kubenswrapper[5055]: I1011 07:14:22.981964 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.051698 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b587f8db7-7qnbh"] Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.051744 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7b587f8db7-7qnbh"] Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.052808 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-bk7mn" podStartSLOduration=2.555623346 podStartE2EDuration="11.052787813s" podCreationTimestamp="2025-10-11 07:14:12 +0000 UTC" firstStartedPulling="2025-10-11 07:14:13.808577473 +0000 UTC m=+1237.582851270" lastFinishedPulling="2025-10-11 07:14:22.30574193 +0000 UTC m=+1246.080015737" observedRunningTime="2025-10-11 07:14:22.990045888 +0000 UTC m=+1246.764319695" watchObservedRunningTime="2025-10-11 07:14:23.052787813 +0000 UTC m=+1246.827061620" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.063268 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-9f95b955d-vptlr"] Oct 11 07:14:23 crc kubenswrapper[5055]: E1011 07:14:23.063571 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab2cd371-182f-4750-9439-b80dea4a9310" containerName="dnsmasq-dns" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.063588 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab2cd371-182f-4750-9439-b80dea4a9310" containerName="dnsmasq-dns" Oct 11 07:14:23 crc kubenswrapper[5055]: E1011 07:14:23.063597 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb84efeb-f131-48fc-b283-c6cb0db40cbe" containerName="mariadb-account-create" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.063604 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb84efeb-f131-48fc-b283-c6cb0db40cbe" containerName="mariadb-account-create" Oct 11 07:14:23 crc kubenswrapper[5055]: E1011 07:14:23.063622 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11f3f545-5226-44e9-956b-1c79012e5a74" containerName="placement-db-sync" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.063629 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="11f3f545-5226-44e9-956b-1c79012e5a74" containerName="placement-db-sync" Oct 11 07:14:23 crc kubenswrapper[5055]: E1011 07:14:23.063647 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cd2b520-4d17-4d96-a0d1-a6bdf242f973" containerName="mariadb-account-create" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.063653 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cd2b520-4d17-4d96-a0d1-a6bdf242f973" containerName="mariadb-account-create" Oct 11 07:14:23 crc kubenswrapper[5055]: E1011 07:14:23.063667 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab2cd371-182f-4750-9439-b80dea4a9310" containerName="init" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.063673 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab2cd371-182f-4750-9439-b80dea4a9310" containerName="init" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.063853 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab2cd371-182f-4750-9439-b80dea4a9310" containerName="dnsmasq-dns" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.063867 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="11f3f545-5226-44e9-956b-1c79012e5a74" containerName="placement-db-sync" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.063884 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb84efeb-f131-48fc-b283-c6cb0db40cbe" containerName="mariadb-account-create" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.063898 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cd2b520-4d17-4d96-a0d1-a6bdf242f973" containerName="mariadb-account-create" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.064678 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.068830 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-9f95b955d-vptlr"] Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.069476 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.069544 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.069966 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-6jp7q" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.070119 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.070267 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.237828 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7e77464-d0e5-4a9b-97a1-557cac810654-logs\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.238261 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-internal-tls-certs\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.238338 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-scripts\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.238376 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-combined-ca-bundle\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.238418 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qvcv\" (UniqueName: \"kubernetes.io/projected/c7e77464-d0e5-4a9b-97a1-557cac810654-kube-api-access-4qvcv\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.238483 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-public-tls-certs\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.238528 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-config-data\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.343328 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7e77464-d0e5-4a9b-97a1-557cac810654-logs\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.343445 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-internal-tls-certs\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.343836 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7e77464-d0e5-4a9b-97a1-557cac810654-logs\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.343484 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-scripts\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.344475 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-combined-ca-bundle\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.344509 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qvcv\" (UniqueName: \"kubernetes.io/projected/c7e77464-d0e5-4a9b-97a1-557cac810654-kube-api-access-4qvcv\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.344538 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-public-tls-certs\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.344565 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-config-data\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.348796 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-scripts\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.349294 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-internal-tls-certs\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.351492 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-config-data\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.352533 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-combined-ca-bundle\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.354200 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-public-tls-certs\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.361697 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qvcv\" (UniqueName: \"kubernetes.io/projected/c7e77464-d0e5-4a9b-97a1-557cac810654-kube-api-access-4qvcv\") pod \"placement-9f95b955d-vptlr\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.471692 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.518276 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.968487 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b8eee0b8-baa6-4cbb-a0ee-230b89adb073","Type":"ContainerStarted","Data":"1a9dd45ee3e83283bc4c67687d9a75523999a033d2bab86f134ae42061e8bef0"} Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.969100 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b8eee0b8-baa6-4cbb-a0ee-230b89adb073","Type":"ContainerStarted","Data":"bdec8129aeaf64c9cca57c44845abb536e328d18fa6aeea9b86d28616395ddd1"} Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.971027 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"cc75f86e-fb96-4f2d-a852-66912a678430","Type":"ContainerStarted","Data":"ab3ec22e7b582a44411c57313be600437c73255d5832aae070c316fc9ef8e094"} Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.971453 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-9f95b955d-vptlr"] Oct 11 07:14:23 crc kubenswrapper[5055]: W1011 07:14:23.972074 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc7e77464_d0e5_4a9b_97a1_557cac810654.slice/crio-6d82590db7bcb61c86326bd78a7086f9f1282ceb3a193f4a5f2b963338ffbf54 WatchSource:0}: Error finding container 6d82590db7bcb61c86326bd78a7086f9f1282ceb3a193f4a5f2b963338ffbf54: Status 404 returned error can't find the container with id 6d82590db7bcb61c86326bd78a7086f9f1282ceb3a193f4a5f2b963338ffbf54 Oct 11 07:14:23 crc kubenswrapper[5055]: I1011 07:14:23.978482 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-55cv5" event={"ID":"e8184ab7-72ce-4337-9e27-a04849f1e796","Type":"ContainerStarted","Data":"47ded605de4372a0e91cff570a44e5fbe01c76b283e1b8e5907eeaa5d52389bc"} Oct 11 07:14:24 crc kubenswrapper[5055]: I1011 07:14:24.005547 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-55cv5" podStartSLOduration=6.005527364 podStartE2EDuration="6.005527364s" podCreationTimestamp="2025-10-11 07:14:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:14:23.995173421 +0000 UTC m=+1247.769447248" watchObservedRunningTime="2025-10-11 07:14:24.005527364 +0000 UTC m=+1247.779801181" Oct 11 07:14:24 crc kubenswrapper[5055]: I1011 07:14:24.988596 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b8eee0b8-baa6-4cbb-a0ee-230b89adb073","Type":"ContainerStarted","Data":"cb16ae1b9b332802488fc88c04afb8c3ed39071dd545eb4b99255d83d4e3c16a"} Oct 11 07:14:25 crc kubenswrapper[5055]: I1011 07:14:25.016279 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab2cd371-182f-4750-9439-b80dea4a9310" path="/var/lib/kubelet/pods/ab2cd371-182f-4750-9439-b80dea4a9310/volumes" Oct 11 07:14:25 crc kubenswrapper[5055]: I1011 07:14:25.016345 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.016328257 podStartE2EDuration="7.016328257s" podCreationTimestamp="2025-10-11 07:14:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:14:25.011573262 +0000 UTC m=+1248.785847069" watchObservedRunningTime="2025-10-11 07:14:25.016328257 +0000 UTC m=+1248.790602064" Oct 11 07:14:25 crc kubenswrapper[5055]: I1011 07:14:25.016906 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:25 crc kubenswrapper[5055]: I1011 07:14:25.016944 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:25 crc kubenswrapper[5055]: I1011 07:14:25.016955 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9f95b955d-vptlr" event={"ID":"c7e77464-d0e5-4a9b-97a1-557cac810654","Type":"ContainerStarted","Data":"82a484cebdaeca44b956dd6026c1a5e65bf7f6fa2ec01d1293e086f33d6250f6"} Oct 11 07:14:25 crc kubenswrapper[5055]: I1011 07:14:25.016968 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9f95b955d-vptlr" event={"ID":"c7e77464-d0e5-4a9b-97a1-557cac810654","Type":"ContainerStarted","Data":"8d5ba7c659e482e9feef4958452dbc1a0c5e75bab6d21a07960f5948ca9ca733"} Oct 11 07:14:25 crc kubenswrapper[5055]: I1011 07:14:25.016977 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9f95b955d-vptlr" event={"ID":"c7e77464-d0e5-4a9b-97a1-557cac810654","Type":"ContainerStarted","Data":"6d82590db7bcb61c86326bd78a7086f9f1282ceb3a193f4a5f2b963338ffbf54"} Oct 11 07:14:25 crc kubenswrapper[5055]: I1011 07:14:25.017013 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"cc75f86e-fb96-4f2d-a852-66912a678430","Type":"ContainerStarted","Data":"c9b70bda0cf162db2afe12d3f3dd8a8cb6c6497d562f02b62d86cb61ba89eac8"} Oct 11 07:14:25 crc kubenswrapper[5055]: I1011 07:14:25.032045 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-9f95b955d-vptlr" podStartSLOduration=3.032028981 podStartE2EDuration="3.032028981s" podCreationTimestamp="2025-10-11 07:14:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:14:25.029706905 +0000 UTC m=+1248.803980712" watchObservedRunningTime="2025-10-11 07:14:25.032028981 +0000 UTC m=+1248.806302788" Oct 11 07:14:26 crc kubenswrapper[5055]: I1011 07:14:26.018178 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"cc75f86e-fb96-4f2d-a852-66912a678430","Type":"ContainerStarted","Data":"d5026e4621b733cfc25e44d6134706b5fa3663e90b2437e596f40565201fe479"} Oct 11 07:14:26 crc kubenswrapper[5055]: I1011 07:14:26.053567 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=8.053550298 podStartE2EDuration="8.053550298s" podCreationTimestamp="2025-10-11 07:14:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:14:26.043902055 +0000 UTC m=+1249.818175862" watchObservedRunningTime="2025-10-11 07:14:26.053550298 +0000 UTC m=+1249.827824105" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.655892 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-hjxfj"] Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.657253 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.661725 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.661904 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-b8f8l" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.662133 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.670122 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-hjxfj"] Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.744418 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-etc-machine-id\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.744487 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-db-sync-config-data\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.744553 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-combined-ca-bundle\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.744577 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-scripts\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.744606 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-config-data\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.744651 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5f5t9\" (UniqueName: \"kubernetes.io/projected/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-kube-api-access-5f5t9\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.846242 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-combined-ca-bundle\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.846288 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-scripts\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.846315 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-config-data\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.846349 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5f5t9\" (UniqueName: \"kubernetes.io/projected/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-kube-api-access-5f5t9\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.846400 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-etc-machine-id\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.846439 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-db-sync-config-data\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.847587 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-etc-machine-id\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.852662 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-scripts\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.855897 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-combined-ca-bundle\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.856722 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-db-sync-config-data\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.871453 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5f5t9\" (UniqueName: \"kubernetes.io/projected/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-kube-api-access-5f5t9\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.879304 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-config-data\") pod \"cinder-db-sync-hjxfj\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.879572 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-jb7ms"] Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.880664 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-jb7ms" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.884191 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-mr5gz" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.884461 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.884865 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.889948 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-jb7ms"] Oct 11 07:14:27 crc kubenswrapper[5055]: I1011 07:14:27.980805 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:14:28 crc kubenswrapper[5055]: I1011 07:14:28.049375 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c649c0a0-d654-447f-9558-4b0376e1beaf-config\") pod \"neutron-db-sync-jb7ms\" (UID: \"c649c0a0-d654-447f-9558-4b0376e1beaf\") " pod="openstack/neutron-db-sync-jb7ms" Oct 11 07:14:28 crc kubenswrapper[5055]: I1011 07:14:28.049702 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-px942\" (UniqueName: \"kubernetes.io/projected/c649c0a0-d654-447f-9558-4b0376e1beaf-kube-api-access-px942\") pod \"neutron-db-sync-jb7ms\" (UID: \"c649c0a0-d654-447f-9558-4b0376e1beaf\") " pod="openstack/neutron-db-sync-jb7ms" Oct 11 07:14:28 crc kubenswrapper[5055]: I1011 07:14:28.049864 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c649c0a0-d654-447f-9558-4b0376e1beaf-combined-ca-bundle\") pod \"neutron-db-sync-jb7ms\" (UID: \"c649c0a0-d654-447f-9558-4b0376e1beaf\") " pod="openstack/neutron-db-sync-jb7ms" Oct 11 07:14:28 crc kubenswrapper[5055]: I1011 07:14:28.151740 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-px942\" (UniqueName: \"kubernetes.io/projected/c649c0a0-d654-447f-9558-4b0376e1beaf-kube-api-access-px942\") pod \"neutron-db-sync-jb7ms\" (UID: \"c649c0a0-d654-447f-9558-4b0376e1beaf\") " pod="openstack/neutron-db-sync-jb7ms" Oct 11 07:14:28 crc kubenswrapper[5055]: I1011 07:14:28.152632 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c649c0a0-d654-447f-9558-4b0376e1beaf-combined-ca-bundle\") pod \"neutron-db-sync-jb7ms\" (UID: \"c649c0a0-d654-447f-9558-4b0376e1beaf\") " pod="openstack/neutron-db-sync-jb7ms" Oct 11 07:14:28 crc kubenswrapper[5055]: I1011 07:14:28.152833 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c649c0a0-d654-447f-9558-4b0376e1beaf-config\") pod \"neutron-db-sync-jb7ms\" (UID: \"c649c0a0-d654-447f-9558-4b0376e1beaf\") " pod="openstack/neutron-db-sync-jb7ms" Oct 11 07:14:28 crc kubenswrapper[5055]: I1011 07:14:28.162595 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c649c0a0-d654-447f-9558-4b0376e1beaf-combined-ca-bundle\") pod \"neutron-db-sync-jb7ms\" (UID: \"c649c0a0-d654-447f-9558-4b0376e1beaf\") " pod="openstack/neutron-db-sync-jb7ms" Oct 11 07:14:28 crc kubenswrapper[5055]: I1011 07:14:28.167605 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/c649c0a0-d654-447f-9558-4b0376e1beaf-config\") pod \"neutron-db-sync-jb7ms\" (UID: \"c649c0a0-d654-447f-9558-4b0376e1beaf\") " pod="openstack/neutron-db-sync-jb7ms" Oct 11 07:14:28 crc kubenswrapper[5055]: I1011 07:14:28.175672 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-px942\" (UniqueName: \"kubernetes.io/projected/c649c0a0-d654-447f-9558-4b0376e1beaf-kube-api-access-px942\") pod \"neutron-db-sync-jb7ms\" (UID: \"c649c0a0-d654-447f-9558-4b0376e1beaf\") " pod="openstack/neutron-db-sync-jb7ms" Oct 11 07:14:28 crc kubenswrapper[5055]: I1011 07:14:28.265643 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-jb7ms" Oct 11 07:14:29 crc kubenswrapper[5055]: I1011 07:14:29.041267 5055 generic.go:334] "Generic (PLEG): container finished" podID="e8184ab7-72ce-4337-9e27-a04849f1e796" containerID="47ded605de4372a0e91cff570a44e5fbe01c76b283e1b8e5907eeaa5d52389bc" exitCode=0 Oct 11 07:14:29 crc kubenswrapper[5055]: I1011 07:14:29.041306 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-55cv5" event={"ID":"e8184ab7-72ce-4337-9e27-a04849f1e796","Type":"ContainerDied","Data":"47ded605de4372a0e91cff570a44e5fbe01c76b283e1b8e5907eeaa5d52389bc"} Oct 11 07:14:29 crc kubenswrapper[5055]: I1011 07:14:29.324693 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 11 07:14:29 crc kubenswrapper[5055]: I1011 07:14:29.324759 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 11 07:14:29 crc kubenswrapper[5055]: I1011 07:14:29.365019 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 11 07:14:29 crc kubenswrapper[5055]: I1011 07:14:29.375961 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 11 07:14:29 crc kubenswrapper[5055]: I1011 07:14:29.379149 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:29 crc kubenswrapper[5055]: I1011 07:14:29.379224 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:29 crc kubenswrapper[5055]: I1011 07:14:29.412005 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:29 crc kubenswrapper[5055]: I1011 07:14:29.438434 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.051067 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.051429 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.051448 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.051462 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.245622 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-hjxfj"] Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.325753 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-jb7ms"] Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.452951 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.593920 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hf8p9\" (UniqueName: \"kubernetes.io/projected/e8184ab7-72ce-4337-9e27-a04849f1e796-kube-api-access-hf8p9\") pod \"e8184ab7-72ce-4337-9e27-a04849f1e796\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.593980 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-combined-ca-bundle\") pod \"e8184ab7-72ce-4337-9e27-a04849f1e796\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.594016 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-credential-keys\") pod \"e8184ab7-72ce-4337-9e27-a04849f1e796\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.594086 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-fernet-keys\") pod \"e8184ab7-72ce-4337-9e27-a04849f1e796\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.594223 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-config-data\") pod \"e8184ab7-72ce-4337-9e27-a04849f1e796\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.594247 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-scripts\") pod \"e8184ab7-72ce-4337-9e27-a04849f1e796\" (UID: \"e8184ab7-72ce-4337-9e27-a04849f1e796\") " Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.599422 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8184ab7-72ce-4337-9e27-a04849f1e796-kube-api-access-hf8p9" (OuterVolumeSpecName: "kube-api-access-hf8p9") pod "e8184ab7-72ce-4337-9e27-a04849f1e796" (UID: "e8184ab7-72ce-4337-9e27-a04849f1e796"). InnerVolumeSpecName "kube-api-access-hf8p9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.599519 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "e8184ab7-72ce-4337-9e27-a04849f1e796" (UID: "e8184ab7-72ce-4337-9e27-a04849f1e796"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.601809 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-scripts" (OuterVolumeSpecName: "scripts") pod "e8184ab7-72ce-4337-9e27-a04849f1e796" (UID: "e8184ab7-72ce-4337-9e27-a04849f1e796"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.602369 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "e8184ab7-72ce-4337-9e27-a04849f1e796" (UID: "e8184ab7-72ce-4337-9e27-a04849f1e796"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.631881 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e8184ab7-72ce-4337-9e27-a04849f1e796" (UID: "e8184ab7-72ce-4337-9e27-a04849f1e796"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.634890 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-config-data" (OuterVolumeSpecName: "config-data") pod "e8184ab7-72ce-4337-9e27-a04849f1e796" (UID: "e8184ab7-72ce-4337-9e27-a04849f1e796"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.697678 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.697710 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.697743 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hf8p9\" (UniqueName: \"kubernetes.io/projected/e8184ab7-72ce-4337-9e27-a04849f1e796-kube-api-access-hf8p9\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.697756 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.697785 5055 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-credential-keys\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:30 crc kubenswrapper[5055]: I1011 07:14:30.697794 5055 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e8184ab7-72ce-4337-9e27-a04849f1e796-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.060569 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-hjxfj" event={"ID":"6ba129d1-5c6b-41a7-9a7f-d469ad919b75","Type":"ContainerStarted","Data":"766f5d06bb7d9601c08fc7a0fe49a4a5caa6e2d23b469cd2ef36b1658074922a"} Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.062807 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3908605d-c4b6-4df0-84a7-1137a9928b09","Type":"ContainerStarted","Data":"c17bd1759f4a63211e843407e2aa025bf481f7ffb868e53ebfcfd77e2db4a016"} Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.064031 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-jb7ms" event={"ID":"c649c0a0-d654-447f-9558-4b0376e1beaf","Type":"ContainerStarted","Data":"81e8ee154acdddce8902a40492fdd451667f1d4ec00f3aebf1bba9058dae907a"} Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.064065 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-jb7ms" event={"ID":"c649c0a0-d654-447f-9558-4b0376e1beaf","Type":"ContainerStarted","Data":"dcb183836a7b9b5215666671803c3ac232b9bffd68640c65b75b1dc55ceda1e3"} Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.066877 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-55cv5" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.066926 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-55cv5" event={"ID":"e8184ab7-72ce-4337-9e27-a04849f1e796","Type":"ContainerDied","Data":"7d70199ce427c97f89132348c316089ecc92bdabe32969875ac2119851bb9962"} Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.068131 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7d70199ce427c97f89132348c316089ecc92bdabe32969875ac2119851bb9962" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.094584 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-jb7ms" podStartSLOduration=4.094564938 podStartE2EDuration="4.094564938s" podCreationTimestamp="2025-10-11 07:14:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:14:31.078203625 +0000 UTC m=+1254.852477452" watchObservedRunningTime="2025-10-11 07:14:31.094564938 +0000 UTC m=+1254.868838735" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.136382 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-867958d55b-prp6q"] Oct 11 07:14:31 crc kubenswrapper[5055]: E1011 07:14:31.137018 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8184ab7-72ce-4337-9e27-a04849f1e796" containerName="keystone-bootstrap" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.137044 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8184ab7-72ce-4337-9e27-a04849f1e796" containerName="keystone-bootstrap" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.137323 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8184ab7-72ce-4337-9e27-a04849f1e796" containerName="keystone-bootstrap" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.138083 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.143080 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-v2rpk" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.143271 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.143379 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.143484 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.143778 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.148497 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.158801 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-867958d55b-prp6q"] Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.207861 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-combined-ca-bundle\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.207924 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-fernet-keys\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.207979 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6nlk\" (UniqueName: \"kubernetes.io/projected/5108504f-f2dd-4f43-8d7a-f630e055d661-kube-api-access-x6nlk\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.208004 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-config-data\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.208049 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-credential-keys\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.208103 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-public-tls-certs\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.208267 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-internal-tls-certs\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.208314 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-scripts\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.309592 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-public-tls-certs\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.309699 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-internal-tls-certs\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.309737 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-scripts\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.309811 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-combined-ca-bundle\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.309837 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-fernet-keys\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.309874 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6nlk\" (UniqueName: \"kubernetes.io/projected/5108504f-f2dd-4f43-8d7a-f630e055d661-kube-api-access-x6nlk\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.309896 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-config-data\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.309935 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-credential-keys\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.316252 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-public-tls-certs\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.316648 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-internal-tls-certs\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.318369 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-credential-keys\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.319213 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-config-data\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.321534 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-scripts\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.332631 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-fernet-keys\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.333241 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-combined-ca-bundle\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.341341 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6nlk\" (UniqueName: \"kubernetes.io/projected/5108504f-f2dd-4f43-8d7a-f630e055d661-kube-api-access-x6nlk\") pod \"keystone-867958d55b-prp6q\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:31 crc kubenswrapper[5055]: I1011 07:14:31.474324 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:32 crc kubenswrapper[5055]: I1011 07:14:32.137850 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-867958d55b-prp6q"] Oct 11 07:14:32 crc kubenswrapper[5055]: W1011 07:14:32.143709 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5108504f_f2dd_4f43_8d7a_f630e055d661.slice/crio-f181c4ee9b4630984c1970b1f87eb8b3233446ded580b2a186ef858f60c78ea9 WatchSource:0}: Error finding container f181c4ee9b4630984c1970b1f87eb8b3233446ded580b2a186ef858f60c78ea9: Status 404 returned error can't find the container with id f181c4ee9b4630984c1970b1f87eb8b3233446ded580b2a186ef858f60c78ea9 Oct 11 07:14:32 crc kubenswrapper[5055]: I1011 07:14:32.317413 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 11 07:14:32 crc kubenswrapper[5055]: I1011 07:14:32.317847 5055 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 07:14:32 crc kubenswrapper[5055]: I1011 07:14:32.422827 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:14:32 crc kubenswrapper[5055]: I1011 07:14:32.422884 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:14:32 crc kubenswrapper[5055]: I1011 07:14:32.547701 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:32 crc kubenswrapper[5055]: I1011 07:14:32.547841 5055 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 07:14:32 crc kubenswrapper[5055]: I1011 07:14:32.607359 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 11 07:14:32 crc kubenswrapper[5055]: I1011 07:14:32.648651 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 11 07:14:33 crc kubenswrapper[5055]: I1011 07:14:33.087335 5055 generic.go:334] "Generic (PLEG): container finished" podID="4c34e566-ca8a-4c34-a24e-5401cf63666b" containerID="fe652d39e7e6e3d258f7f666f03e71db8cb9fbfd39a1541489d6565528bf31fc" exitCode=0 Oct 11 07:14:33 crc kubenswrapper[5055]: I1011 07:14:33.087419 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-bk7mn" event={"ID":"4c34e566-ca8a-4c34-a24e-5401cf63666b","Type":"ContainerDied","Data":"fe652d39e7e6e3d258f7f666f03e71db8cb9fbfd39a1541489d6565528bf31fc"} Oct 11 07:14:33 crc kubenswrapper[5055]: I1011 07:14:33.091734 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-867958d55b-prp6q" event={"ID":"5108504f-f2dd-4f43-8d7a-f630e055d661","Type":"ContainerStarted","Data":"e8e6bf3ea958d93967bb08adda0bad8a6b27f850a70be0cd9489070df6fd455e"} Oct 11 07:14:33 crc kubenswrapper[5055]: I1011 07:14:33.091805 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-867958d55b-prp6q" event={"ID":"5108504f-f2dd-4f43-8d7a-f630e055d661","Type":"ContainerStarted","Data":"f181c4ee9b4630984c1970b1f87eb8b3233446ded580b2a186ef858f60c78ea9"} Oct 11 07:14:33 crc kubenswrapper[5055]: I1011 07:14:33.092386 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:14:33 crc kubenswrapper[5055]: I1011 07:14:33.126859 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-867958d55b-prp6q" podStartSLOduration=2.126834606 podStartE2EDuration="2.126834606s" podCreationTimestamp="2025-10-11 07:14:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:14:33.121949028 +0000 UTC m=+1256.896222835" watchObservedRunningTime="2025-10-11 07:14:33.126834606 +0000 UTC m=+1256.901108413" Oct 11 07:14:34 crc kubenswrapper[5055]: I1011 07:14:34.590944 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-bk7mn" Oct 11 07:14:34 crc kubenswrapper[5055]: I1011 07:14:34.788163 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hp8ht\" (UniqueName: \"kubernetes.io/projected/4c34e566-ca8a-4c34-a24e-5401cf63666b-kube-api-access-hp8ht\") pod \"4c34e566-ca8a-4c34-a24e-5401cf63666b\" (UID: \"4c34e566-ca8a-4c34-a24e-5401cf63666b\") " Oct 11 07:14:34 crc kubenswrapper[5055]: I1011 07:14:34.788562 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c34e566-ca8a-4c34-a24e-5401cf63666b-combined-ca-bundle\") pod \"4c34e566-ca8a-4c34-a24e-5401cf63666b\" (UID: \"4c34e566-ca8a-4c34-a24e-5401cf63666b\") " Oct 11 07:14:34 crc kubenswrapper[5055]: I1011 07:14:34.788612 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c34e566-ca8a-4c34-a24e-5401cf63666b-db-sync-config-data\") pod \"4c34e566-ca8a-4c34-a24e-5401cf63666b\" (UID: \"4c34e566-ca8a-4c34-a24e-5401cf63666b\") " Oct 11 07:14:34 crc kubenswrapper[5055]: I1011 07:14:34.795460 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c34e566-ca8a-4c34-a24e-5401cf63666b-kube-api-access-hp8ht" (OuterVolumeSpecName: "kube-api-access-hp8ht") pod "4c34e566-ca8a-4c34-a24e-5401cf63666b" (UID: "4c34e566-ca8a-4c34-a24e-5401cf63666b"). InnerVolumeSpecName "kube-api-access-hp8ht". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:34 crc kubenswrapper[5055]: I1011 07:14:34.798297 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c34e566-ca8a-4c34-a24e-5401cf63666b-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "4c34e566-ca8a-4c34-a24e-5401cf63666b" (UID: "4c34e566-ca8a-4c34-a24e-5401cf63666b"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:34 crc kubenswrapper[5055]: I1011 07:14:34.824402 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c34e566-ca8a-4c34-a24e-5401cf63666b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c34e566-ca8a-4c34-a24e-5401cf63666b" (UID: "4c34e566-ca8a-4c34-a24e-5401cf63666b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:34 crc kubenswrapper[5055]: I1011 07:14:34.890535 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c34e566-ca8a-4c34-a24e-5401cf63666b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:34 crc kubenswrapper[5055]: I1011 07:14:34.890570 5055 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c34e566-ca8a-4c34-a24e-5401cf63666b-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:34 crc kubenswrapper[5055]: I1011 07:14:34.890582 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hp8ht\" (UniqueName: \"kubernetes.io/projected/4c34e566-ca8a-4c34-a24e-5401cf63666b-kube-api-access-hp8ht\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.112408 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-bk7mn" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.112893 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-bk7mn" event={"ID":"4c34e566-ca8a-4c34-a24e-5401cf63666b","Type":"ContainerDied","Data":"5b7cd1b4fadd3f5483b21b2ad2da8f7536b063aafb6728e08d4588ef9bbfc4e2"} Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.112918 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b7cd1b4fadd3f5483b21b2ad2da8f7536b063aafb6728e08d4588ef9bbfc4e2" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.250857 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-68ddb9fb98-g6thb"] Oct 11 07:14:35 crc kubenswrapper[5055]: E1011 07:14:35.251401 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c34e566-ca8a-4c34-a24e-5401cf63666b" containerName="barbican-db-sync" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.251428 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c34e566-ca8a-4c34-a24e-5401cf63666b" containerName="barbican-db-sync" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.251674 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c34e566-ca8a-4c34-a24e-5401cf63666b" containerName="barbican-db-sync" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.252850 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.263435 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.263919 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-vf7lq" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.264098 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.267000 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7fffd4488c-r5vkt"] Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.268859 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.270239 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.281312 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-68ddb9fb98-g6thb"] Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.289926 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7fffd4488c-r5vkt"] Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.372896 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54f9cb888f-jslms"] Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.374612 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.402609 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54f9cb888f-jslms"] Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.404635 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-config-data\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.404690 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l66tk\" (UniqueName: \"kubernetes.io/projected/7cf968bd-0ddb-4647-8c76-8038cb19d053-kube-api-access-l66tk\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.404800 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd3f46f-87e3-42e7-a37d-e746fb601f60-logs\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.404823 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-config-data-custom\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.404846 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-combined-ca-bundle\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.404860 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-config-data\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.404891 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-combined-ca-bundle\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.404916 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7cf968bd-0ddb-4647-8c76-8038cb19d053-logs\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.404969 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pr6n5\" (UniqueName: \"kubernetes.io/projected/0bd3f46f-87e3-42e7-a37d-e746fb601f60-kube-api-access-pr6n5\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.405105 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-config-data-custom\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.489238 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-56c5d58b4-jhj26"] Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.495341 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.500937 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.502195 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-56c5d58b4-jhj26"] Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.507697 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-combined-ca-bundle\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.507893 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-config-data\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.508008 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-combined-ca-bundle\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.508088 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-ovsdbserver-sb\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.509135 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7cf968bd-0ddb-4647-8c76-8038cb19d053-logs\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.509262 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-dns-swift-storage-0\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.509331 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pr6n5\" (UniqueName: \"kubernetes.io/projected/0bd3f46f-87e3-42e7-a37d-e746fb601f60-kube-api-access-pr6n5\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.509408 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-config-data-custom\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.509491 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-dns-svc\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.509586 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-config\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.509668 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8k8wg\" (UniqueName: \"kubernetes.io/projected/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-kube-api-access-8k8wg\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.509735 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-ovsdbserver-nb\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.509819 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-config-data\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.509894 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l66tk\" (UniqueName: \"kubernetes.io/projected/7cf968bd-0ddb-4647-8c76-8038cb19d053-kube-api-access-l66tk\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.510277 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7cf968bd-0ddb-4647-8c76-8038cb19d053-logs\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.510506 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd3f46f-87e3-42e7-a37d-e746fb601f60-logs\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.510547 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-config-data-custom\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.510975 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd3f46f-87e3-42e7-a37d-e746fb601f60-logs\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.513263 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-combined-ca-bundle\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.517676 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-combined-ca-bundle\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.518858 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-config-data-custom\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.519185 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-config-data\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.527188 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-config-data-custom\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.530861 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l66tk\" (UniqueName: \"kubernetes.io/projected/7cf968bd-0ddb-4647-8c76-8038cb19d053-kube-api-access-l66tk\") pod \"barbican-keystone-listener-68ddb9fb98-g6thb\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.534523 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pr6n5\" (UniqueName: \"kubernetes.io/projected/0bd3f46f-87e3-42e7-a37d-e746fb601f60-kube-api-access-pr6n5\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.537659 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-config-data\") pod \"barbican-worker-7fffd4488c-r5vkt\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.594219 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.616531 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-config-data-custom\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.616579 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9f42\" (UniqueName: \"kubernetes.io/projected/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-kube-api-access-z9f42\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.616790 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-logs\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.616837 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-ovsdbserver-sb\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.616863 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-combined-ca-bundle\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.616885 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-dns-swift-storage-0\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.616982 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-dns-svc\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.617013 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-config\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.617041 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k8wg\" (UniqueName: \"kubernetes.io/projected/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-kube-api-access-8k8wg\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.617061 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-ovsdbserver-nb\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.617082 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-config-data\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.617957 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-ovsdbserver-sb\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.618332 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-dns-svc\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.618511 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-config\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.618958 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-dns-swift-storage-0\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.619407 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-ovsdbserver-nb\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.621635 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.635816 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8k8wg\" (UniqueName: \"kubernetes.io/projected/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-kube-api-access-8k8wg\") pod \"dnsmasq-dns-54f9cb888f-jslms\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.709185 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.718618 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-logs\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.718663 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-combined-ca-bundle\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.718726 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-config-data\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.718789 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-config-data-custom\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.718815 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9f42\" (UniqueName: \"kubernetes.io/projected/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-kube-api-access-z9f42\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.719141 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-logs\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.722072 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-combined-ca-bundle\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.722982 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-config-data-custom\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.728971 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-config-data\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.734796 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9f42\" (UniqueName: \"kubernetes.io/projected/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-kube-api-access-z9f42\") pod \"barbican-api-56c5d58b4-jhj26\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:35 crc kubenswrapper[5055]: I1011 07:14:35.905021 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:37 crc kubenswrapper[5055]: I1011 07:14:37.898695 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-66cb796856-btqn8"] Oct 11 07:14:37 crc kubenswrapper[5055]: I1011 07:14:37.900538 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:37 crc kubenswrapper[5055]: I1011 07:14:37.903906 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Oct 11 07:14:37 crc kubenswrapper[5055]: I1011 07:14:37.904289 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Oct 11 07:14:37 crc kubenswrapper[5055]: I1011 07:14:37.921579 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-66cb796856-btqn8"] Oct 11 07:14:37 crc kubenswrapper[5055]: I1011 07:14:37.964673 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8d8rc\" (UniqueName: \"kubernetes.io/projected/324f6d50-368a-4f8c-8cee-4a9ff543ba31-kube-api-access-8d8rc\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:37 crc kubenswrapper[5055]: I1011 07:14:37.964721 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-config-data-custom\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:37 crc kubenswrapper[5055]: I1011 07:14:37.964823 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-public-tls-certs\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:37 crc kubenswrapper[5055]: I1011 07:14:37.964865 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-internal-tls-certs\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:37 crc kubenswrapper[5055]: I1011 07:14:37.964894 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-config-data\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:37 crc kubenswrapper[5055]: I1011 07:14:37.964925 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-combined-ca-bundle\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:37 crc kubenswrapper[5055]: I1011 07:14:37.964985 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/324f6d50-368a-4f8c-8cee-4a9ff543ba31-logs\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.067175 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-combined-ca-bundle\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.067377 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/324f6d50-368a-4f8c-8cee-4a9ff543ba31-logs\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.067406 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8d8rc\" (UniqueName: \"kubernetes.io/projected/324f6d50-368a-4f8c-8cee-4a9ff543ba31-kube-api-access-8d8rc\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.067440 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-config-data-custom\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.067519 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-public-tls-certs\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.068089 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/324f6d50-368a-4f8c-8cee-4a9ff543ba31-logs\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.068330 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-internal-tls-certs\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.068389 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-config-data\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.073175 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-combined-ca-bundle\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.073887 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-config-data\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.074001 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-internal-tls-certs\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.074565 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-config-data-custom\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.076553 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-public-tls-certs\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.085466 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8d8rc\" (UniqueName: \"kubernetes.io/projected/324f6d50-368a-4f8c-8cee-4a9ff543ba31-kube-api-access-8d8rc\") pod \"barbican-api-66cb796856-btqn8\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:38 crc kubenswrapper[5055]: I1011 07:14:38.218322 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:48 crc kubenswrapper[5055]: E1011 07:14:48.643124 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/ubi9/httpd-24@sha256:e91d58021b54c46883595ff66be65882de54abdb3be2ca53c4162b20d18b5f48" Oct 11 07:14:48 crc kubenswrapper[5055]: E1011 07:14:48.643912 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:proxy-httpd,Image:registry.redhat.io/ubi9/httpd-24@sha256:e91d58021b54c46883595ff66be65882de54abdb3be2ca53c4162b20d18b5f48,Command:[/usr/sbin/httpd],Args:[-DFOREGROUND],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:proxy-httpd,HostPort:0,ContainerPort:3000,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf/httpd.conf,SubPath:httpd.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf.d/ssl.conf,SubPath:ssl.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:run-httpd,ReadOnly:false,MountPath:/run/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:log-httpd,ReadOnly:false,MountPath:/var/log/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7pwv2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3908605d-c4b6-4df0-84a7-1137a9928b09): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 11 07:14:48 crc kubenswrapper[5055]: E1011 07:14:48.645055 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"proxy-httpd\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="3908605d-c4b6-4df0-84a7-1137a9928b09" Oct 11 07:14:49 crc kubenswrapper[5055]: I1011 07:14:49.255662 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerName="ceilometer-central-agent" containerID="cri-o://b6883db390a82949d295202200b69f3df1adddf2c18d6dbfde9b90393a0fb99b" gracePeriod=30 Oct 11 07:14:49 crc kubenswrapper[5055]: I1011 07:14:49.255724 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerName="sg-core" containerID="cri-o://c17bd1759f4a63211e843407e2aa025bf481f7ffb868e53ebfcfd77e2db4a016" gracePeriod=30 Oct 11 07:14:49 crc kubenswrapper[5055]: I1011 07:14:49.255790 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerName="ceilometer-notification-agent" containerID="cri-o://9234f535538a79d8c0d5a8de448290234ec0ec0b2d6be0b128ca234b1ee96c3d" gracePeriod=30 Oct 11 07:14:49 crc kubenswrapper[5055]: E1011 07:14:49.702786 5055 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:85c75d60e1bd2f8a9ea0a2bb21a8df64c0a6f7b504cc1a05a355981d4b90e92f" Oct 11 07:14:49 crc kubenswrapper[5055]: E1011 07:14:49.703263 5055 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:85c75d60e1bd2f8a9ea0a2bb21a8df64c0a6f7b504cc1a05a355981d4b90e92f,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5f5t9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-hjxfj_openstack(6ba129d1-5c6b-41a7-9a7f-d469ad919b75): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 11 07:14:49 crc kubenswrapper[5055]: E1011 07:14:49.704477 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-hjxfj" podUID="6ba129d1-5c6b-41a7-9a7f-d469ad919b75" Oct 11 07:14:50 crc kubenswrapper[5055]: I1011 07:14:50.248921 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54f9cb888f-jslms"] Oct 11 07:14:50 crc kubenswrapper[5055]: W1011 07:14:50.255240 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc46cb7e1_610f_4887_8d0e_52d085b0d7f0.slice/crio-d8ae881ccb87a387e1da0251a6e04ed35d6f24a2b9f8a54e79db4444e857db9d WatchSource:0}: Error finding container d8ae881ccb87a387e1da0251a6e04ed35d6f24a2b9f8a54e79db4444e857db9d: Status 404 returned error can't find the container with id d8ae881ccb87a387e1da0251a6e04ed35d6f24a2b9f8a54e79db4444e857db9d Oct 11 07:14:50 crc kubenswrapper[5055]: I1011 07:14:50.265164 5055 generic.go:334] "Generic (PLEG): container finished" podID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerID="c17bd1759f4a63211e843407e2aa025bf481f7ffb868e53ebfcfd77e2db4a016" exitCode=2 Oct 11 07:14:50 crc kubenswrapper[5055]: I1011 07:14:50.265191 5055 generic.go:334] "Generic (PLEG): container finished" podID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerID="b6883db390a82949d295202200b69f3df1adddf2c18d6dbfde9b90393a0fb99b" exitCode=0 Oct 11 07:14:50 crc kubenswrapper[5055]: I1011 07:14:50.265221 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3908605d-c4b6-4df0-84a7-1137a9928b09","Type":"ContainerDied","Data":"c17bd1759f4a63211e843407e2aa025bf481f7ffb868e53ebfcfd77e2db4a016"} Oct 11 07:14:50 crc kubenswrapper[5055]: I1011 07:14:50.265243 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3908605d-c4b6-4df0-84a7-1137a9928b09","Type":"ContainerDied","Data":"b6883db390a82949d295202200b69f3df1adddf2c18d6dbfde9b90393a0fb99b"} Oct 11 07:14:50 crc kubenswrapper[5055]: I1011 07:14:50.267171 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9cb888f-jslms" event={"ID":"c46cb7e1-610f-4887-8d0e-52d085b0d7f0","Type":"ContainerStarted","Data":"d8ae881ccb87a387e1da0251a6e04ed35d6f24a2b9f8a54e79db4444e857db9d"} Oct 11 07:14:50 crc kubenswrapper[5055]: E1011 07:14:50.269459 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:85c75d60e1bd2f8a9ea0a2bb21a8df64c0a6f7b504cc1a05a355981d4b90e92f\\\"\"" pod="openstack/cinder-db-sync-hjxfj" podUID="6ba129d1-5c6b-41a7-9a7f-d469ad919b75" Oct 11 07:14:50 crc kubenswrapper[5055]: I1011 07:14:50.312716 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-68ddb9fb98-g6thb"] Oct 11 07:14:50 crc kubenswrapper[5055]: I1011 07:14:50.319561 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7fffd4488c-r5vkt"] Oct 11 07:14:50 crc kubenswrapper[5055]: W1011 07:14:50.320181 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0bd3f46f_87e3_42e7_a37d_e746fb601f60.slice/crio-8f011a1e8c65e1d3151d79b7e649db7839c472218f188896199cf1e02f7d55de WatchSource:0}: Error finding container 8f011a1e8c65e1d3151d79b7e649db7839c472218f188896199cf1e02f7d55de: Status 404 returned error can't find the container with id 8f011a1e8c65e1d3151d79b7e649db7839c472218f188896199cf1e02f7d55de Oct 11 07:14:50 crc kubenswrapper[5055]: W1011 07:14:50.448688 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54c2e818_4fd5_44b3_9bcc_e08b813f83bd.slice/crio-9bcf83e8c6631231b2a44702ca4f167828a9020d649e74e31cc4688aa8c99c3b WatchSource:0}: Error finding container 9bcf83e8c6631231b2a44702ca4f167828a9020d649e74e31cc4688aa8c99c3b: Status 404 returned error can't find the container with id 9bcf83e8c6631231b2a44702ca4f167828a9020d649e74e31cc4688aa8c99c3b Oct 11 07:14:50 crc kubenswrapper[5055]: I1011 07:14:50.450086 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-66cb796856-btqn8"] Oct 11 07:14:50 crc kubenswrapper[5055]: I1011 07:14:50.457813 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-56c5d58b4-jhj26"] Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.153184 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.246067 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-sg-core-conf-yaml\") pod \"3908605d-c4b6-4df0-84a7-1137a9928b09\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.246333 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-scripts\") pod \"3908605d-c4b6-4df0-84a7-1137a9928b09\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.246404 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pwv2\" (UniqueName: \"kubernetes.io/projected/3908605d-c4b6-4df0-84a7-1137a9928b09-kube-api-access-7pwv2\") pod \"3908605d-c4b6-4df0-84a7-1137a9928b09\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.246540 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-config-data\") pod \"3908605d-c4b6-4df0-84a7-1137a9928b09\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.246566 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-combined-ca-bundle\") pod \"3908605d-c4b6-4df0-84a7-1137a9928b09\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.246647 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3908605d-c4b6-4df0-84a7-1137a9928b09-log-httpd\") pod \"3908605d-c4b6-4df0-84a7-1137a9928b09\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.246689 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3908605d-c4b6-4df0-84a7-1137a9928b09-run-httpd\") pod \"3908605d-c4b6-4df0-84a7-1137a9928b09\" (UID: \"3908605d-c4b6-4df0-84a7-1137a9928b09\") " Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.247307 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3908605d-c4b6-4df0-84a7-1137a9928b09-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3908605d-c4b6-4df0-84a7-1137a9928b09" (UID: "3908605d-c4b6-4df0-84a7-1137a9928b09"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.247593 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3908605d-c4b6-4df0-84a7-1137a9928b09-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3908605d-c4b6-4df0-84a7-1137a9928b09" (UID: "3908605d-c4b6-4df0-84a7-1137a9928b09"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.250056 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3908605d-c4b6-4df0-84a7-1137a9928b09-kube-api-access-7pwv2" (OuterVolumeSpecName: "kube-api-access-7pwv2") pod "3908605d-c4b6-4df0-84a7-1137a9928b09" (UID: "3908605d-c4b6-4df0-84a7-1137a9928b09"). InnerVolumeSpecName "kube-api-access-7pwv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.250917 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-scripts" (OuterVolumeSpecName: "scripts") pod "3908605d-c4b6-4df0-84a7-1137a9928b09" (UID: "3908605d-c4b6-4df0-84a7-1137a9928b09"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.275861 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3908605d-c4b6-4df0-84a7-1137a9928b09" (UID: "3908605d-c4b6-4df0-84a7-1137a9928b09"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.283040 5055 generic.go:334] "Generic (PLEG): container finished" podID="c46cb7e1-610f-4887-8d0e-52d085b0d7f0" containerID="235343b3153196a77772912ee66a994a965f2096d5af94749ae6248f0dec63f8" exitCode=0 Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.283109 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9cb888f-jslms" event={"ID":"c46cb7e1-610f-4887-8d0e-52d085b0d7f0","Type":"ContainerDied","Data":"235343b3153196a77772912ee66a994a965f2096d5af94749ae6248f0dec63f8"} Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.297977 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56c5d58b4-jhj26" event={"ID":"54c2e818-4fd5-44b3-9bcc-e08b813f83bd","Type":"ContainerStarted","Data":"c1d684263aaf87efbcea506b560a28b1b946789bf372a768d36acec5fea4d022"} Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.298022 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56c5d58b4-jhj26" event={"ID":"54c2e818-4fd5-44b3-9bcc-e08b813f83bd","Type":"ContainerStarted","Data":"5acf172225cdf40a6f216adcb336b961ebecebc35c031e3ef6b0bdf717685fd2"} Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.298031 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56c5d58b4-jhj26" event={"ID":"54c2e818-4fd5-44b3-9bcc-e08b813f83bd","Type":"ContainerStarted","Data":"9bcf83e8c6631231b2a44702ca4f167828a9020d649e74e31cc4688aa8c99c3b"} Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.298883 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.298901 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.308884 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3908605d-c4b6-4df0-84a7-1137a9928b09" (UID: "3908605d-c4b6-4df0-84a7-1137a9928b09"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.348895 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7pwv2\" (UniqueName: \"kubernetes.io/projected/3908605d-c4b6-4df0-84a7-1137a9928b09-kube-api-access-7pwv2\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.348933 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.348948 5055 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3908605d-c4b6-4df0-84a7-1137a9928b09-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.348960 5055 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3908605d-c4b6-4df0-84a7-1137a9928b09-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.348971 5055 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.348983 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.349762 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-66cb796856-btqn8" event={"ID":"324f6d50-368a-4f8c-8cee-4a9ff543ba31","Type":"ContainerStarted","Data":"b032f4e2acda19474410a349d97f1f7434164e628aa86411ceb38dcda72e0648"} Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.350017 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-66cb796856-btqn8" event={"ID":"324f6d50-368a-4f8c-8cee-4a9ff543ba31","Type":"ContainerStarted","Data":"6bdcd9c1fc8e6d8238ec30f7f0ec872804f3202b031d16214896ea2c72adfe7c"} Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.350031 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-66cb796856-btqn8" event={"ID":"324f6d50-368a-4f8c-8cee-4a9ff543ba31","Type":"ContainerStarted","Data":"84f912d588b362be8f4dff4b2de01bd13e69ccf55752b1ce067cbae1a93601ce"} Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.351027 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.351067 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.356900 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-config-data" (OuterVolumeSpecName: "config-data") pod "3908605d-c4b6-4df0-84a7-1137a9928b09" (UID: "3908605d-c4b6-4df0-84a7-1137a9928b09"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.361144 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fffd4488c-r5vkt" event={"ID":"0bd3f46f-87e3-42e7-a37d-e746fb601f60","Type":"ContainerStarted","Data":"8f011a1e8c65e1d3151d79b7e649db7839c472218f188896199cf1e02f7d55de"} Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.364255 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-56c5d58b4-jhj26" podStartSLOduration=16.364236215 podStartE2EDuration="16.364236215s" podCreationTimestamp="2025-10-11 07:14:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:14:51.330336606 +0000 UTC m=+1275.104610413" watchObservedRunningTime="2025-10-11 07:14:51.364236215 +0000 UTC m=+1275.138510012" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.365099 5055 generic.go:334] "Generic (PLEG): container finished" podID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerID="9234f535538a79d8c0d5a8de448290234ec0ec0b2d6be0b128ca234b1ee96c3d" exitCode=0 Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.365162 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3908605d-c4b6-4df0-84a7-1137a9928b09","Type":"ContainerDied","Data":"9234f535538a79d8c0d5a8de448290234ec0ec0b2d6be0b128ca234b1ee96c3d"} Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.365194 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3908605d-c4b6-4df0-84a7-1137a9928b09","Type":"ContainerDied","Data":"9dc29d93eff33329f28fa4fe8fa49ca84374f675cb9b53bc7557fb53ba019d8a"} Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.365216 5055 scope.go:117] "RemoveContainer" containerID="c17bd1759f4a63211e843407e2aa025bf481f7ffb868e53ebfcfd77e2db4a016" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.365360 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.381357 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" event={"ID":"7cf968bd-0ddb-4647-8c76-8038cb19d053","Type":"ContainerStarted","Data":"64228fa0248689ac70aec2777d6d69b8da2c68388d2ccbd55401815b2f00144b"} Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.394557 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-66cb796856-btqn8" podStartSLOduration=14.394533592 podStartE2EDuration="14.394533592s" podCreationTimestamp="2025-10-11 07:14:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:14:51.372093397 +0000 UTC m=+1275.146367204" watchObservedRunningTime="2025-10-11 07:14:51.394533592 +0000 UTC m=+1275.168807399" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.460659 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3908605d-c4b6-4df0-84a7-1137a9928b09-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.475450 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.511025 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.516724 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:14:51 crc kubenswrapper[5055]: E1011 07:14:51.517054 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerName="ceilometer-central-agent" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.517073 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerName="ceilometer-central-agent" Oct 11 07:14:51 crc kubenswrapper[5055]: E1011 07:14:51.517111 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerName="sg-core" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.517118 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerName="sg-core" Oct 11 07:14:51 crc kubenswrapper[5055]: E1011 07:14:51.517135 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerName="ceilometer-notification-agent" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.517144 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerName="ceilometer-notification-agent" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.517287 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerName="sg-core" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.517313 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerName="ceilometer-central-agent" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.517329 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3908605d-c4b6-4df0-84a7-1137a9928b09" containerName="ceilometer-notification-agent" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.518815 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.526980 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.527853 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.536340 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.563018 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7160286e-8686-49e8-850f-9bdc283a4a50-log-httpd\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.563090 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ql87c\" (UniqueName: \"kubernetes.io/projected/7160286e-8686-49e8-850f-9bdc283a4a50-kube-api-access-ql87c\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.563114 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.563156 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7160286e-8686-49e8-850f-9bdc283a4a50-run-httpd\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.563181 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.563257 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-scripts\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.563276 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-config-data\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.676326 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7160286e-8686-49e8-850f-9bdc283a4a50-run-httpd\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.676379 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.676457 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-scripts\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.676470 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-config-data\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.676550 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7160286e-8686-49e8-850f-9bdc283a4a50-log-httpd\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.676577 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ql87c\" (UniqueName: \"kubernetes.io/projected/7160286e-8686-49e8-850f-9bdc283a4a50-kube-api-access-ql87c\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.676594 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.678107 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7160286e-8686-49e8-850f-9bdc283a4a50-run-httpd\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.678142 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7160286e-8686-49e8-850f-9bdc283a4a50-log-httpd\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.685176 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.685369 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-scripts\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.685932 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-config-data\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.691645 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.693509 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ql87c\" (UniqueName: \"kubernetes.io/projected/7160286e-8686-49e8-850f-9bdc283a4a50-kube-api-access-ql87c\") pod \"ceilometer-0\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " pod="openstack/ceilometer-0" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.740077 5055 scope.go:117] "RemoveContainer" containerID="9234f535538a79d8c0d5a8de448290234ec0ec0b2d6be0b128ca234b1ee96c3d" Oct 11 07:14:51 crc kubenswrapper[5055]: I1011 07:14:51.836290 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:14:52 crc kubenswrapper[5055]: I1011 07:14:52.991969 5055 scope.go:117] "RemoveContainer" containerID="b6883db390a82949d295202200b69f3df1adddf2c18d6dbfde9b90393a0fb99b" Oct 11 07:14:53 crc kubenswrapper[5055]: I1011 07:14:53.006057 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3908605d-c4b6-4df0-84a7-1137a9928b09" path="/var/lib/kubelet/pods/3908605d-c4b6-4df0-84a7-1137a9928b09/volumes" Oct 11 07:14:53 crc kubenswrapper[5055]: I1011 07:14:53.065891 5055 scope.go:117] "RemoveContainer" containerID="c17bd1759f4a63211e843407e2aa025bf481f7ffb868e53ebfcfd77e2db4a016" Oct 11 07:14:53 crc kubenswrapper[5055]: E1011 07:14:53.066554 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c17bd1759f4a63211e843407e2aa025bf481f7ffb868e53ebfcfd77e2db4a016\": container with ID starting with c17bd1759f4a63211e843407e2aa025bf481f7ffb868e53ebfcfd77e2db4a016 not found: ID does not exist" containerID="c17bd1759f4a63211e843407e2aa025bf481f7ffb868e53ebfcfd77e2db4a016" Oct 11 07:14:53 crc kubenswrapper[5055]: I1011 07:14:53.066663 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c17bd1759f4a63211e843407e2aa025bf481f7ffb868e53ebfcfd77e2db4a016"} err="failed to get container status \"c17bd1759f4a63211e843407e2aa025bf481f7ffb868e53ebfcfd77e2db4a016\": rpc error: code = NotFound desc = could not find container \"c17bd1759f4a63211e843407e2aa025bf481f7ffb868e53ebfcfd77e2db4a016\": container with ID starting with c17bd1759f4a63211e843407e2aa025bf481f7ffb868e53ebfcfd77e2db4a016 not found: ID does not exist" Oct 11 07:14:53 crc kubenswrapper[5055]: I1011 07:14:53.066829 5055 scope.go:117] "RemoveContainer" containerID="9234f535538a79d8c0d5a8de448290234ec0ec0b2d6be0b128ca234b1ee96c3d" Oct 11 07:14:53 crc kubenswrapper[5055]: E1011 07:14:53.067343 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9234f535538a79d8c0d5a8de448290234ec0ec0b2d6be0b128ca234b1ee96c3d\": container with ID starting with 9234f535538a79d8c0d5a8de448290234ec0ec0b2d6be0b128ca234b1ee96c3d not found: ID does not exist" containerID="9234f535538a79d8c0d5a8de448290234ec0ec0b2d6be0b128ca234b1ee96c3d" Oct 11 07:14:53 crc kubenswrapper[5055]: I1011 07:14:53.067398 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9234f535538a79d8c0d5a8de448290234ec0ec0b2d6be0b128ca234b1ee96c3d"} err="failed to get container status \"9234f535538a79d8c0d5a8de448290234ec0ec0b2d6be0b128ca234b1ee96c3d\": rpc error: code = NotFound desc = could not find container \"9234f535538a79d8c0d5a8de448290234ec0ec0b2d6be0b128ca234b1ee96c3d\": container with ID starting with 9234f535538a79d8c0d5a8de448290234ec0ec0b2d6be0b128ca234b1ee96c3d not found: ID does not exist" Oct 11 07:14:53 crc kubenswrapper[5055]: I1011 07:14:53.067434 5055 scope.go:117] "RemoveContainer" containerID="b6883db390a82949d295202200b69f3df1adddf2c18d6dbfde9b90393a0fb99b" Oct 11 07:14:53 crc kubenswrapper[5055]: E1011 07:14:53.067852 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6883db390a82949d295202200b69f3df1adddf2c18d6dbfde9b90393a0fb99b\": container with ID starting with b6883db390a82949d295202200b69f3df1adddf2c18d6dbfde9b90393a0fb99b not found: ID does not exist" containerID="b6883db390a82949d295202200b69f3df1adddf2c18d6dbfde9b90393a0fb99b" Oct 11 07:14:53 crc kubenswrapper[5055]: I1011 07:14:53.067985 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6883db390a82949d295202200b69f3df1adddf2c18d6dbfde9b90393a0fb99b"} err="failed to get container status \"b6883db390a82949d295202200b69f3df1adddf2c18d6dbfde9b90393a0fb99b\": rpc error: code = NotFound desc = could not find container \"b6883db390a82949d295202200b69f3df1adddf2c18d6dbfde9b90393a0fb99b\": container with ID starting with b6883db390a82949d295202200b69f3df1adddf2c18d6dbfde9b90393a0fb99b not found: ID does not exist" Oct 11 07:14:53 crc kubenswrapper[5055]: I1011 07:14:53.402893 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" event={"ID":"7cf968bd-0ddb-4647-8c76-8038cb19d053","Type":"ContainerStarted","Data":"0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4"} Oct 11 07:14:53 crc kubenswrapper[5055]: I1011 07:14:53.404801 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9cb888f-jslms" event={"ID":"c46cb7e1-610f-4887-8d0e-52d085b0d7f0","Type":"ContainerStarted","Data":"d77478ad861433120dd0a676dc75cdf4d51bc6d76b6c32f200236911fe9744b3"} Oct 11 07:14:53 crc kubenswrapper[5055]: I1011 07:14:53.404959 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:14:53 crc kubenswrapper[5055]: I1011 07:14:53.406969 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fffd4488c-r5vkt" event={"ID":"0bd3f46f-87e3-42e7-a37d-e746fb601f60","Type":"ContainerStarted","Data":"5b162a0640d55adbc95b5344fee3401f099e891f722bbcd511184a279d6fc712"} Oct 11 07:14:53 crc kubenswrapper[5055]: I1011 07:14:53.433921 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54f9cb888f-jslms" podStartSLOduration=18.433897081 podStartE2EDuration="18.433897081s" podCreationTimestamp="2025-10-11 07:14:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:14:53.426613285 +0000 UTC m=+1277.200887102" watchObservedRunningTime="2025-10-11 07:14:53.433897081 +0000 UTC m=+1277.208170888" Oct 11 07:14:53 crc kubenswrapper[5055]: I1011 07:14:53.519376 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:14:54 crc kubenswrapper[5055]: I1011 07:14:54.424806 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fffd4488c-r5vkt" event={"ID":"0bd3f46f-87e3-42e7-a37d-e746fb601f60","Type":"ContainerStarted","Data":"161a466facbf08e74bf08d457e0525bdf9302acd69cb47d57a131bf40297c7a8"} Oct 11 07:14:54 crc kubenswrapper[5055]: I1011 07:14:54.427828 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7160286e-8686-49e8-850f-9bdc283a4a50","Type":"ContainerStarted","Data":"bbef988c8a62f3c6e4d85f950c06aad68deb511b896cfed32cb368904a01c90e"} Oct 11 07:14:54 crc kubenswrapper[5055]: I1011 07:14:54.436926 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" event={"ID":"7cf968bd-0ddb-4647-8c76-8038cb19d053","Type":"ContainerStarted","Data":"6c92db08f0560ec22ab28b315f7ec817b7f7ae5ff9b9adbe69e0c475a691fdb1"} Oct 11 07:14:54 crc kubenswrapper[5055]: I1011 07:14:54.455094 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7fffd4488c-r5vkt" podStartSLOduration=16.686092958 podStartE2EDuration="19.455077558s" podCreationTimestamp="2025-10-11 07:14:35 +0000 UTC" firstStartedPulling="2025-10-11 07:14:50.324145272 +0000 UTC m=+1274.098419079" lastFinishedPulling="2025-10-11 07:14:53.093129882 +0000 UTC m=+1276.867403679" observedRunningTime="2025-10-11 07:14:54.445541038 +0000 UTC m=+1278.219814845" watchObservedRunningTime="2025-10-11 07:14:54.455077558 +0000 UTC m=+1278.229351365" Oct 11 07:14:54 crc kubenswrapper[5055]: I1011 07:14:54.540283 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:54 crc kubenswrapper[5055]: I1011 07:14:54.540858 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:14:54 crc kubenswrapper[5055]: I1011 07:14:54.570457 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" podStartSLOduration=16.807351099 podStartE2EDuration="19.570436621s" podCreationTimestamp="2025-10-11 07:14:35 +0000 UTC" firstStartedPulling="2025-10-11 07:14:50.32863699 +0000 UTC m=+1274.102910797" lastFinishedPulling="2025-10-11 07:14:53.091722512 +0000 UTC m=+1276.865996319" observedRunningTime="2025-10-11 07:14:54.476606517 +0000 UTC m=+1278.250880314" watchObservedRunningTime="2025-10-11 07:14:54.570436621 +0000 UTC m=+1278.344710428" Oct 11 07:14:55 crc kubenswrapper[5055]: I1011 07:14:55.445135 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7160286e-8686-49e8-850f-9bdc283a4a50","Type":"ContainerStarted","Data":"50c8bf8be092aa5575e850de8cf541ccd2504f5ef84cb40d39f711bf8f67f741"} Oct 11 07:14:55 crc kubenswrapper[5055]: I1011 07:14:55.446019 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7160286e-8686-49e8-850f-9bdc283a4a50","Type":"ContainerStarted","Data":"544482de6387283242f4fb3c7aced088405f9691250b6885a7b699ebd124781e"} Oct 11 07:14:56 crc kubenswrapper[5055]: I1011 07:14:56.456594 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7160286e-8686-49e8-850f-9bdc283a4a50","Type":"ContainerStarted","Data":"4d2f8714dd7ca63d6ed1f16a977f4eeb14d2017a21fd69bd2909d022949a7e3e"} Oct 11 07:14:57 crc kubenswrapper[5055]: I1011 07:14:57.465330 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7160286e-8686-49e8-850f-9bdc283a4a50","Type":"ContainerStarted","Data":"2dfc44369f1cc20d8ffeb963d8c4f3181d8d04f6e640f9ae10071650e167e566"} Oct 11 07:14:57 crc kubenswrapper[5055]: I1011 07:14:57.465560 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 11 07:14:57 crc kubenswrapper[5055]: I1011 07:14:57.491645 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.172466483 podStartE2EDuration="6.491622905s" podCreationTimestamp="2025-10-11 07:14:51 +0000 UTC" firstStartedPulling="2025-10-11 07:14:53.535937097 +0000 UTC m=+1277.310210904" lastFinishedPulling="2025-10-11 07:14:56.855093519 +0000 UTC m=+1280.629367326" observedRunningTime="2025-10-11 07:14:57.487283463 +0000 UTC m=+1281.261557270" watchObservedRunningTime="2025-10-11 07:14:57.491622905 +0000 UTC m=+1281.265896712" Oct 11 07:14:57 crc kubenswrapper[5055]: I1011 07:14:57.585833 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:58 crc kubenswrapper[5055]: I1011 07:14:58.475053 5055 generic.go:334] "Generic (PLEG): container finished" podID="c649c0a0-d654-447f-9558-4b0376e1beaf" containerID="81e8ee154acdddce8902a40492fdd451667f1d4ec00f3aebf1bba9058dae907a" exitCode=0 Oct 11 07:14:58 crc kubenswrapper[5055]: I1011 07:14:58.475150 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-jb7ms" event={"ID":"c649c0a0-d654-447f-9558-4b0376e1beaf","Type":"ContainerDied","Data":"81e8ee154acdddce8902a40492fdd451667f1d4ec00f3aebf1bba9058dae907a"} Oct 11 07:14:59 crc kubenswrapper[5055]: I1011 07:14:59.095808 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:14:59 crc kubenswrapper[5055]: I1011 07:14:59.596411 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:59 crc kubenswrapper[5055]: I1011 07:14:59.675948 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:14:59 crc kubenswrapper[5055]: I1011 07:14:59.753278 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-56c5d58b4-jhj26"] Oct 11 07:14:59 crc kubenswrapper[5055]: I1011 07:14:59.753467 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-56c5d58b4-jhj26" podUID="54c2e818-4fd5-44b3-9bcc-e08b813f83bd" containerName="barbican-api-log" containerID="cri-o://5acf172225cdf40a6f216adcb336b961ebecebc35c031e3ef6b0bdf717685fd2" gracePeriod=30 Oct 11 07:14:59 crc kubenswrapper[5055]: I1011 07:14:59.753848 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-56c5d58b4-jhj26" podUID="54c2e818-4fd5-44b3-9bcc-e08b813f83bd" containerName="barbican-api" containerID="cri-o://c1d684263aaf87efbcea506b560a28b1b946789bf372a768d36acec5fea4d022" gracePeriod=30 Oct 11 07:14:59 crc kubenswrapper[5055]: I1011 07:14:59.955425 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-jb7ms" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.041265 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c649c0a0-d654-447f-9558-4b0376e1beaf-config\") pod \"c649c0a0-d654-447f-9558-4b0376e1beaf\" (UID: \"c649c0a0-d654-447f-9558-4b0376e1beaf\") " Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.041409 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c649c0a0-d654-447f-9558-4b0376e1beaf-combined-ca-bundle\") pod \"c649c0a0-d654-447f-9558-4b0376e1beaf\" (UID: \"c649c0a0-d654-447f-9558-4b0376e1beaf\") " Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.041523 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-px942\" (UniqueName: \"kubernetes.io/projected/c649c0a0-d654-447f-9558-4b0376e1beaf-kube-api-access-px942\") pod \"c649c0a0-d654-447f-9558-4b0376e1beaf\" (UID: \"c649c0a0-d654-447f-9558-4b0376e1beaf\") " Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.064970 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c649c0a0-d654-447f-9558-4b0376e1beaf-kube-api-access-px942" (OuterVolumeSpecName: "kube-api-access-px942") pod "c649c0a0-d654-447f-9558-4b0376e1beaf" (UID: "c649c0a0-d654-447f-9558-4b0376e1beaf"). InnerVolumeSpecName "kube-api-access-px942". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.078726 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c649c0a0-d654-447f-9558-4b0376e1beaf-config" (OuterVolumeSpecName: "config") pod "c649c0a0-d654-447f-9558-4b0376e1beaf" (UID: "c649c0a0-d654-447f-9558-4b0376e1beaf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.079168 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c649c0a0-d654-447f-9558-4b0376e1beaf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c649c0a0-d654-447f-9558-4b0376e1beaf" (UID: "c649c0a0-d654-447f-9558-4b0376e1beaf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.141055 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72"] Oct 11 07:15:00 crc kubenswrapper[5055]: E1011 07:15:00.141462 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c649c0a0-d654-447f-9558-4b0376e1beaf" containerName="neutron-db-sync" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.141478 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c649c0a0-d654-447f-9558-4b0376e1beaf" containerName="neutron-db-sync" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.141690 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c649c0a0-d654-447f-9558-4b0376e1beaf" containerName="neutron-db-sync" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.143427 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-px942\" (UniqueName: \"kubernetes.io/projected/c649c0a0-d654-447f-9558-4b0376e1beaf-kube-api-access-px942\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.143465 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/c649c0a0-d654-447f-9558-4b0376e1beaf-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.143478 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c649c0a0-d654-447f-9558-4b0376e1beaf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.143648 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.147229 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.147341 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.148862 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72"] Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.245414 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0a67a7ab-47f4-4dc8-a49e-249233f62627-config-volume\") pod \"collect-profiles-29336115-fns72\" (UID: \"0a67a7ab-47f4-4dc8-a49e-249233f62627\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.245498 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0a67a7ab-47f4-4dc8-a49e-249233f62627-secret-volume\") pod \"collect-profiles-29336115-fns72\" (UID: \"0a67a7ab-47f4-4dc8-a49e-249233f62627\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.245640 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmt25\" (UniqueName: \"kubernetes.io/projected/0a67a7ab-47f4-4dc8-a49e-249233f62627-kube-api-access-rmt25\") pod \"collect-profiles-29336115-fns72\" (UID: \"0a67a7ab-47f4-4dc8-a49e-249233f62627\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.347325 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0a67a7ab-47f4-4dc8-a49e-249233f62627-config-volume\") pod \"collect-profiles-29336115-fns72\" (UID: \"0a67a7ab-47f4-4dc8-a49e-249233f62627\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.347404 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0a67a7ab-47f4-4dc8-a49e-249233f62627-secret-volume\") pod \"collect-profiles-29336115-fns72\" (UID: \"0a67a7ab-47f4-4dc8-a49e-249233f62627\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.347524 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmt25\" (UniqueName: \"kubernetes.io/projected/0a67a7ab-47f4-4dc8-a49e-249233f62627-kube-api-access-rmt25\") pod \"collect-profiles-29336115-fns72\" (UID: \"0a67a7ab-47f4-4dc8-a49e-249233f62627\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.349317 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0a67a7ab-47f4-4dc8-a49e-249233f62627-config-volume\") pod \"collect-profiles-29336115-fns72\" (UID: \"0a67a7ab-47f4-4dc8-a49e-249233f62627\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.354984 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0a67a7ab-47f4-4dc8-a49e-249233f62627-secret-volume\") pod \"collect-profiles-29336115-fns72\" (UID: \"0a67a7ab-47f4-4dc8-a49e-249233f62627\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.363831 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmt25\" (UniqueName: \"kubernetes.io/projected/0a67a7ab-47f4-4dc8-a49e-249233f62627-kube-api-access-rmt25\") pod \"collect-profiles-29336115-fns72\" (UID: \"0a67a7ab-47f4-4dc8-a49e-249233f62627\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.493406 5055 generic.go:334] "Generic (PLEG): container finished" podID="54c2e818-4fd5-44b3-9bcc-e08b813f83bd" containerID="5acf172225cdf40a6f216adcb336b961ebecebc35c031e3ef6b0bdf717685fd2" exitCode=143 Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.493510 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56c5d58b4-jhj26" event={"ID":"54c2e818-4fd5-44b3-9bcc-e08b813f83bd","Type":"ContainerDied","Data":"5acf172225cdf40a6f216adcb336b961ebecebc35c031e3ef6b0bdf717685fd2"} Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.495604 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-jb7ms" event={"ID":"c649c0a0-d654-447f-9558-4b0376e1beaf","Type":"ContainerDied","Data":"dcb183836a7b9b5215666671803c3ac232b9bffd68640c65b75b1dc55ceda1e3"} Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.495641 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dcb183836a7b9b5215666671803c3ac232b9bffd68640c65b75b1dc55ceda1e3" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.495642 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-jb7ms" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.499181 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.684756 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54f9cb888f-jslms"] Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.685070 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54f9cb888f-jslms" podUID="c46cb7e1-610f-4887-8d0e-52d085b0d7f0" containerName="dnsmasq-dns" containerID="cri-o://d77478ad861433120dd0a676dc75cdf4d51bc6d76b6c32f200236911fe9744b3" gracePeriod=10 Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.685944 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.712916 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-54f9cb888f-jslms" podUID="c46cb7e1-610f-4887-8d0e-52d085b0d7f0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.152:5353: connect: connection refused" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.744532 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c78787df7-gwqf7"] Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.767774 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.899218 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-config\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.899334 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-dns-svc\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.899357 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-ovsdbserver-nb\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.899402 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6d5nq\" (UniqueName: \"kubernetes.io/projected/5b434027-d1f5-464b-9d67-97324f18ab07-kube-api-access-6d5nq\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.899444 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-dns-swift-storage-0\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.899509 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-ovsdbserver-sb\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.929894 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c78787df7-gwqf7"] Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.950460 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-56f4bfc5b4-slbvq"] Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.952105 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.955381 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.962031 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.962145 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-56f4bfc5b4-slbvq"] Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.962296 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-mr5gz" Oct 11 07:15:00 crc kubenswrapper[5055]: I1011 07:15:00.962786 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.005717 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-dns-svc\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.005777 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-ovsdbserver-nb\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.005810 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6d5nq\" (UniqueName: \"kubernetes.io/projected/5b434027-d1f5-464b-9d67-97324f18ab07-kube-api-access-6d5nq\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.005839 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-dns-swift-storage-0\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.005884 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-ovsdbserver-sb\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.005931 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-config\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.006857 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-config\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.007028 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-ovsdbserver-nb\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.007543 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-dns-swift-storage-0\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.008262 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-ovsdbserver-sb\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.009883 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-dns-svc\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.033095 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6d5nq\" (UniqueName: \"kubernetes.io/projected/5b434027-d1f5-464b-9d67-97324f18ab07-kube-api-access-6d5nq\") pod \"dnsmasq-dns-5c78787df7-gwqf7\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.106949 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-ovndb-tls-certs\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.106997 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-config\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.107029 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6f9w\" (UniqueName: \"kubernetes.io/projected/c880aae1-2230-47fb-94b5-5b7337a19cbe-kube-api-access-l6f9w\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.107051 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-combined-ca-bundle\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.107087 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-httpd-config\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.125864 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.156795 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72"] Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.211538 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-config\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.211911 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6f9w\" (UniqueName: \"kubernetes.io/projected/c880aae1-2230-47fb-94b5-5b7337a19cbe-kube-api-access-l6f9w\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.211943 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-combined-ca-bundle\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.211995 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-httpd-config\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.212101 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-ovndb-tls-certs\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.221431 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-config\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.224545 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-ovndb-tls-certs\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.224597 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-httpd-config\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.225461 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-combined-ca-bundle\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.231671 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6f9w\" (UniqueName: \"kubernetes.io/projected/c880aae1-2230-47fb-94b5-5b7337a19cbe-kube-api-access-l6f9w\") pod \"neutron-56f4bfc5b4-slbvq\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.278314 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.360200 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.508464 5055 generic.go:334] "Generic (PLEG): container finished" podID="c46cb7e1-610f-4887-8d0e-52d085b0d7f0" containerID="d77478ad861433120dd0a676dc75cdf4d51bc6d76b6c32f200236911fe9744b3" exitCode=0 Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.508551 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54f9cb888f-jslms" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.508572 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9cb888f-jslms" event={"ID":"c46cb7e1-610f-4887-8d0e-52d085b0d7f0","Type":"ContainerDied","Data":"d77478ad861433120dd0a676dc75cdf4d51bc6d76b6c32f200236911fe9744b3"} Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.509712 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54f9cb888f-jslms" event={"ID":"c46cb7e1-610f-4887-8d0e-52d085b0d7f0","Type":"ContainerDied","Data":"d8ae881ccb87a387e1da0251a6e04ed35d6f24a2b9f8a54e79db4444e857db9d"} Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.509733 5055 scope.go:117] "RemoveContainer" containerID="d77478ad861433120dd0a676dc75cdf4d51bc6d76b6c32f200236911fe9744b3" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.512652 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" event={"ID":"0a67a7ab-47f4-4dc8-a49e-249233f62627","Type":"ContainerStarted","Data":"8c416bca97c5a5cb8a7638a9c0d02e9ef2dba2faabf4aceaa45dfa2911788b14"} Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.512712 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" event={"ID":"0a67a7ab-47f4-4dc8-a49e-249233f62627","Type":"ContainerStarted","Data":"9c3ffdf8041718fa0951068b5774b418568a6ac1f9284aae8f16b8a013c27d08"} Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.517355 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-dns-svc\") pod \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.517482 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8k8wg\" (UniqueName: \"kubernetes.io/projected/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-kube-api-access-8k8wg\") pod \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.517573 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-config\") pod \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.517649 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-dns-swift-storage-0\") pod \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.517683 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-ovsdbserver-nb\") pod \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.517748 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-ovsdbserver-sb\") pod \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\" (UID: \"c46cb7e1-610f-4887-8d0e-52d085b0d7f0\") " Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.521949 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-kube-api-access-8k8wg" (OuterVolumeSpecName: "kube-api-access-8k8wg") pod "c46cb7e1-610f-4887-8d0e-52d085b0d7f0" (UID: "c46cb7e1-610f-4887-8d0e-52d085b0d7f0"). InnerVolumeSpecName "kube-api-access-8k8wg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.532890 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" podStartSLOduration=1.532871814 podStartE2EDuration="1.532871814s" podCreationTimestamp="2025-10-11 07:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:15:01.532465162 +0000 UTC m=+1285.306738989" watchObservedRunningTime="2025-10-11 07:15:01.532871814 +0000 UTC m=+1285.307145641" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.578201 5055 scope.go:117] "RemoveContainer" containerID="235343b3153196a77772912ee66a994a965f2096d5af94749ae6248f0dec63f8" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.588465 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c46cb7e1-610f-4887-8d0e-52d085b0d7f0" (UID: "c46cb7e1-610f-4887-8d0e-52d085b0d7f0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.588930 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-config" (OuterVolumeSpecName: "config") pod "c46cb7e1-610f-4887-8d0e-52d085b0d7f0" (UID: "c46cb7e1-610f-4887-8d0e-52d085b0d7f0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.608956 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c46cb7e1-610f-4887-8d0e-52d085b0d7f0" (UID: "c46cb7e1-610f-4887-8d0e-52d085b0d7f0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.610311 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c46cb7e1-610f-4887-8d0e-52d085b0d7f0" (UID: "c46cb7e1-610f-4887-8d0e-52d085b0d7f0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.618145 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c46cb7e1-610f-4887-8d0e-52d085b0d7f0" (UID: "c46cb7e1-610f-4887-8d0e-52d085b0d7f0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.620392 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.620412 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.620439 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.620473 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8k8wg\" (UniqueName: \"kubernetes.io/projected/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-kube-api-access-8k8wg\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.620483 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.620491 5055 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c46cb7e1-610f-4887-8d0e-52d085b0d7f0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.642432 5055 scope.go:117] "RemoveContainer" containerID="d77478ad861433120dd0a676dc75cdf4d51bc6d76b6c32f200236911fe9744b3" Oct 11 07:15:01 crc kubenswrapper[5055]: E1011 07:15:01.661385 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d77478ad861433120dd0a676dc75cdf4d51bc6d76b6c32f200236911fe9744b3\": container with ID starting with d77478ad861433120dd0a676dc75cdf4d51bc6d76b6c32f200236911fe9744b3 not found: ID does not exist" containerID="d77478ad861433120dd0a676dc75cdf4d51bc6d76b6c32f200236911fe9744b3" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.661432 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d77478ad861433120dd0a676dc75cdf4d51bc6d76b6c32f200236911fe9744b3"} err="failed to get container status \"d77478ad861433120dd0a676dc75cdf4d51bc6d76b6c32f200236911fe9744b3\": rpc error: code = NotFound desc = could not find container \"d77478ad861433120dd0a676dc75cdf4d51bc6d76b6c32f200236911fe9744b3\": container with ID starting with d77478ad861433120dd0a676dc75cdf4d51bc6d76b6c32f200236911fe9744b3 not found: ID does not exist" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.661471 5055 scope.go:117] "RemoveContainer" containerID="235343b3153196a77772912ee66a994a965f2096d5af94749ae6248f0dec63f8" Oct 11 07:15:01 crc kubenswrapper[5055]: E1011 07:15:01.666952 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"235343b3153196a77772912ee66a994a965f2096d5af94749ae6248f0dec63f8\": container with ID starting with 235343b3153196a77772912ee66a994a965f2096d5af94749ae6248f0dec63f8 not found: ID does not exist" containerID="235343b3153196a77772912ee66a994a965f2096d5af94749ae6248f0dec63f8" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.667008 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"235343b3153196a77772912ee66a994a965f2096d5af94749ae6248f0dec63f8"} err="failed to get container status \"235343b3153196a77772912ee66a994a965f2096d5af94749ae6248f0dec63f8\": rpc error: code = NotFound desc = could not find container \"235343b3153196a77772912ee66a994a965f2096d5af94749ae6248f0dec63f8\": container with ID starting with 235343b3153196a77772912ee66a994a965f2096d5af94749ae6248f0dec63f8 not found: ID does not exist" Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.671581 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c78787df7-gwqf7"] Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.839485 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54f9cb888f-jslms"] Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.848988 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54f9cb888f-jslms"] Oct 11 07:15:01 crc kubenswrapper[5055]: I1011 07:15:01.904382 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-56f4bfc5b4-slbvq"] Oct 11 07:15:02 crc kubenswrapper[5055]: I1011 07:15:02.422643 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:15:02 crc kubenswrapper[5055]: I1011 07:15:02.423021 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:15:02 crc kubenswrapper[5055]: I1011 07:15:02.525157 5055 generic.go:334] "Generic (PLEG): container finished" podID="0a67a7ab-47f4-4dc8-a49e-249233f62627" containerID="8c416bca97c5a5cb8a7638a9c0d02e9ef2dba2faabf4aceaa45dfa2911788b14" exitCode=0 Oct 11 07:15:02 crc kubenswrapper[5055]: I1011 07:15:02.525227 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" event={"ID":"0a67a7ab-47f4-4dc8-a49e-249233f62627","Type":"ContainerDied","Data":"8c416bca97c5a5cb8a7638a9c0d02e9ef2dba2faabf4aceaa45dfa2911788b14"} Oct 11 07:15:02 crc kubenswrapper[5055]: I1011 07:15:02.531644 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-56f4bfc5b4-slbvq" event={"ID":"c880aae1-2230-47fb-94b5-5b7337a19cbe","Type":"ContainerStarted","Data":"496c1b180ccb5531e4ccf2e5df4ffcf02ff7be5ad22305816076c93aef0bf253"} Oct 11 07:15:02 crc kubenswrapper[5055]: I1011 07:15:02.531912 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-56f4bfc5b4-slbvq" event={"ID":"c880aae1-2230-47fb-94b5-5b7337a19cbe","Type":"ContainerStarted","Data":"bcefa1337db59c62d82d9b6051c7a9a7fb0c7d557213ca7b73cc40717a422007"} Oct 11 07:15:02 crc kubenswrapper[5055]: I1011 07:15:02.531994 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:02 crc kubenswrapper[5055]: I1011 07:15:02.532053 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-56f4bfc5b4-slbvq" event={"ID":"c880aae1-2230-47fb-94b5-5b7337a19cbe","Type":"ContainerStarted","Data":"1ec92de3bd7a581223542a45214a9c5509718b31db79fc2f9f121d24797f6f9e"} Oct 11 07:15:02 crc kubenswrapper[5055]: I1011 07:15:02.534847 5055 generic.go:334] "Generic (PLEG): container finished" podID="5b434027-d1f5-464b-9d67-97324f18ab07" containerID="3583fcd259a133f88ef08fa8d636ca50304399fd45b1fa0f1cd8d830b5ac3015" exitCode=0 Oct 11 07:15:02 crc kubenswrapper[5055]: I1011 07:15:02.534894 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" event={"ID":"5b434027-d1f5-464b-9d67-97324f18ab07","Type":"ContainerDied","Data":"3583fcd259a133f88ef08fa8d636ca50304399fd45b1fa0f1cd8d830b5ac3015"} Oct 11 07:15:02 crc kubenswrapper[5055]: I1011 07:15:02.534921 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" event={"ID":"5b434027-d1f5-464b-9d67-97324f18ab07","Type":"ContainerStarted","Data":"86d19ebf9c91d143d4690ce2b31436511ac8bf8e796d5249a8af0eaf8e6dba82"} Oct 11 07:15:02 crc kubenswrapper[5055]: I1011 07:15:02.612494 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-56f4bfc5b4-slbvq" podStartSLOduration=2.612470864 podStartE2EDuration="2.612470864s" podCreationTimestamp="2025-10-11 07:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:15:02.589847004 +0000 UTC m=+1286.364120821" watchObservedRunningTime="2025-10-11 07:15:02.612470864 +0000 UTC m=+1286.386744671" Oct 11 07:15:02 crc kubenswrapper[5055]: I1011 07:15:02.901707 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56c5d58b4-jhj26" podUID="54c2e818-4fd5-44b3-9bcc-e08b813f83bd" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.153:9311/healthcheck\": read tcp 10.217.0.2:51640->10.217.0.153:9311: read: connection reset by peer" Oct 11 07:15:02 crc kubenswrapper[5055]: I1011 07:15:02.901739 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-56c5d58b4-jhj26" podUID="54c2e818-4fd5-44b3-9bcc-e08b813f83bd" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.153:9311/healthcheck\": read tcp 10.217.0.2:51634->10.217.0.153:9311: read: connection reset by peer" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.046045 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c46cb7e1-610f-4887-8d0e-52d085b0d7f0" path="/var/lib/kubelet/pods/c46cb7e1-610f-4887-8d0e-52d085b0d7f0/volumes" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.268153 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.556905 5055 generic.go:334] "Generic (PLEG): container finished" podID="54c2e818-4fd5-44b3-9bcc-e08b813f83bd" containerID="c1d684263aaf87efbcea506b560a28b1b946789bf372a768d36acec5fea4d022" exitCode=0 Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.557338 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56c5d58b4-jhj26" event={"ID":"54c2e818-4fd5-44b3-9bcc-e08b813f83bd","Type":"ContainerDied","Data":"c1d684263aaf87efbcea506b560a28b1b946789bf372a768d36acec5fea4d022"} Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.557400 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56c5d58b4-jhj26" event={"ID":"54c2e818-4fd5-44b3-9bcc-e08b813f83bd","Type":"ContainerDied","Data":"9bcf83e8c6631231b2a44702ca4f167828a9020d649e74e31cc4688aa8c99c3b"} Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.557421 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bcf83e8c6631231b2a44702ca4f167828a9020d649e74e31cc4688aa8c99c3b" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.560253 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" event={"ID":"5b434027-d1f5-464b-9d67-97324f18ab07","Type":"ContainerStarted","Data":"92342d3d4da526bf5a408a8a7a62bea0a488b2c2f6f747c73cacee6dc9fb09d9"} Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.560584 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.580604 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" podStartSLOduration=3.58058588 podStartE2EDuration="3.58058588s" podCreationTimestamp="2025-10-11 07:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:15:03.577719039 +0000 UTC m=+1287.351992846" watchObservedRunningTime="2025-10-11 07:15:03.58058588 +0000 UTC m=+1287.354859687" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.663700 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.762833 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-combined-ca-bundle\") pod \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.762995 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-config-data-custom\") pod \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.763097 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-logs\") pod \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.763131 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-config-data\") pod \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.763188 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9f42\" (UniqueName: \"kubernetes.io/projected/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-kube-api-access-z9f42\") pod \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\" (UID: \"54c2e818-4fd5-44b3-9bcc-e08b813f83bd\") " Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.769122 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-logs" (OuterVolumeSpecName: "logs") pod "54c2e818-4fd5-44b3-9bcc-e08b813f83bd" (UID: "54c2e818-4fd5-44b3-9bcc-e08b813f83bd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.772343 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "54c2e818-4fd5-44b3-9bcc-e08b813f83bd" (UID: "54c2e818-4fd5-44b3-9bcc-e08b813f83bd"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.772436 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-kube-api-access-z9f42" (OuterVolumeSpecName: "kube-api-access-z9f42") pod "54c2e818-4fd5-44b3-9bcc-e08b813f83bd" (UID: "54c2e818-4fd5-44b3-9bcc-e08b813f83bd"). InnerVolumeSpecName "kube-api-access-z9f42". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.831327 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "54c2e818-4fd5-44b3-9bcc-e08b813f83bd" (UID: "54c2e818-4fd5-44b3-9bcc-e08b813f83bd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.868347 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.868387 5055 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.868400 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.868413 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9f42\" (UniqueName: \"kubernetes.io/projected/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-kube-api-access-z9f42\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.888968 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-config-data" (OuterVolumeSpecName: "config-data") pod "54c2e818-4fd5-44b3-9bcc-e08b813f83bd" (UID: "54c2e818-4fd5-44b3-9bcc-e08b813f83bd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.969698 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54c2e818-4fd5-44b3-9bcc-e08b813f83bd-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:03 crc kubenswrapper[5055]: I1011 07:15:03.972161 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.070473 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmt25\" (UniqueName: \"kubernetes.io/projected/0a67a7ab-47f4-4dc8-a49e-249233f62627-kube-api-access-rmt25\") pod \"0a67a7ab-47f4-4dc8-a49e-249233f62627\" (UID: \"0a67a7ab-47f4-4dc8-a49e-249233f62627\") " Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.070586 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0a67a7ab-47f4-4dc8-a49e-249233f62627-secret-volume\") pod \"0a67a7ab-47f4-4dc8-a49e-249233f62627\" (UID: \"0a67a7ab-47f4-4dc8-a49e-249233f62627\") " Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.070702 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0a67a7ab-47f4-4dc8-a49e-249233f62627-config-volume\") pod \"0a67a7ab-47f4-4dc8-a49e-249233f62627\" (UID: \"0a67a7ab-47f4-4dc8-a49e-249233f62627\") " Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.080015 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a67a7ab-47f4-4dc8-a49e-249233f62627-kube-api-access-rmt25" (OuterVolumeSpecName: "kube-api-access-rmt25") pod "0a67a7ab-47f4-4dc8-a49e-249233f62627" (UID: "0a67a7ab-47f4-4dc8-a49e-249233f62627"). InnerVolumeSpecName "kube-api-access-rmt25". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.083301 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a67a7ab-47f4-4dc8-a49e-249233f62627-config-volume" (OuterVolumeSpecName: "config-volume") pod "0a67a7ab-47f4-4dc8-a49e-249233f62627" (UID: "0a67a7ab-47f4-4dc8-a49e-249233f62627"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.083803 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a67a7ab-47f4-4dc8-a49e-249233f62627-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0a67a7ab-47f4-4dc8-a49e-249233f62627" (UID: "0a67a7ab-47f4-4dc8-a49e-249233f62627"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.172468 5055 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0a67a7ab-47f4-4dc8-a49e-249233f62627-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.172506 5055 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0a67a7ab-47f4-4dc8-a49e-249233f62627-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.172520 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmt25\" (UniqueName: \"kubernetes.io/projected/0a67a7ab-47f4-4dc8-a49e-249233f62627-kube-api-access-rmt25\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.568595 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-hjxfj" event={"ID":"6ba129d1-5c6b-41a7-9a7f-d469ad919b75","Type":"ContainerStarted","Data":"a36e56b7fca8b4f65e5fb319758ecc354a7e91d67c137d241820472741a23fe5"} Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.570343 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" event={"ID":"0a67a7ab-47f4-4dc8-a49e-249233f62627","Type":"ContainerDied","Data":"9c3ffdf8041718fa0951068b5774b418568a6ac1f9284aae8f16b8a013c27d08"} Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.570383 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c3ffdf8041718fa0951068b5774b418568a6ac1f9284aae8f16b8a013c27d08" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.570385 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.570365 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56c5d58b4-jhj26" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.587670 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-hjxfj" podStartSLOduration=4.332083076 podStartE2EDuration="37.587649497s" podCreationTimestamp="2025-10-11 07:14:27 +0000 UTC" firstStartedPulling="2025-10-11 07:14:30.263153139 +0000 UTC m=+1254.037426946" lastFinishedPulling="2025-10-11 07:15:03.51871956 +0000 UTC m=+1287.292993367" observedRunningTime="2025-10-11 07:15:04.587625367 +0000 UTC m=+1288.361899174" watchObservedRunningTime="2025-10-11 07:15:04.587649497 +0000 UTC m=+1288.361923304" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.621371 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-56c5d58b4-jhj26"] Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.634431 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-56c5d58b4-jhj26"] Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.697353 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-58d4f6c497-jm75b"] Oct 11 07:15:04 crc kubenswrapper[5055]: E1011 07:15:04.697851 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c2e818-4fd5-44b3-9bcc-e08b813f83bd" containerName="barbican-api" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.697877 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c2e818-4fd5-44b3-9bcc-e08b813f83bd" containerName="barbican-api" Oct 11 07:15:04 crc kubenswrapper[5055]: E1011 07:15:04.697896 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c2e818-4fd5-44b3-9bcc-e08b813f83bd" containerName="barbican-api-log" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.697906 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c2e818-4fd5-44b3-9bcc-e08b813f83bd" containerName="barbican-api-log" Oct 11 07:15:04 crc kubenswrapper[5055]: E1011 07:15:04.697933 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c46cb7e1-610f-4887-8d0e-52d085b0d7f0" containerName="dnsmasq-dns" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.697941 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c46cb7e1-610f-4887-8d0e-52d085b0d7f0" containerName="dnsmasq-dns" Oct 11 07:15:04 crc kubenswrapper[5055]: E1011 07:15:04.697974 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c46cb7e1-610f-4887-8d0e-52d085b0d7f0" containerName="init" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.697992 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c46cb7e1-610f-4887-8d0e-52d085b0d7f0" containerName="init" Oct 11 07:15:04 crc kubenswrapper[5055]: E1011 07:15:04.698003 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a67a7ab-47f4-4dc8-a49e-249233f62627" containerName="collect-profiles" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.698012 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a67a7ab-47f4-4dc8-a49e-249233f62627" containerName="collect-profiles" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.698239 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c46cb7e1-610f-4887-8d0e-52d085b0d7f0" containerName="dnsmasq-dns" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.698261 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a67a7ab-47f4-4dc8-a49e-249233f62627" containerName="collect-profiles" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.698285 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c2e818-4fd5-44b3-9bcc-e08b813f83bd" containerName="barbican-api-log" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.698300 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c2e818-4fd5-44b3-9bcc-e08b813f83bd" containerName="barbican-api" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.699465 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.702061 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.705194 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.706657 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-58d4f6c497-jm75b"] Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.782426 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-ovndb-tls-certs\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.782473 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-httpd-config\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.782512 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4klkm\" (UniqueName: \"kubernetes.io/projected/d9b61d74-f126-4515-ba61-151f8ec0b48c-kube-api-access-4klkm\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.784049 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-public-tls-certs\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.784137 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-config\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.784236 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-combined-ca-bundle\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.784416 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-internal-tls-certs\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.885734 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-public-tls-certs\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.885802 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-config\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.885834 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-combined-ca-bundle\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.885871 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-internal-tls-certs\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.885905 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-ovndb-tls-certs\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.885925 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-httpd-config\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.885960 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4klkm\" (UniqueName: \"kubernetes.io/projected/d9b61d74-f126-4515-ba61-151f8ec0b48c-kube-api-access-4klkm\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.891867 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-internal-tls-certs\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.891882 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-combined-ca-bundle\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.892919 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-ovndb-tls-certs\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.893164 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-public-tls-certs\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.894366 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-config\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.896358 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-httpd-config\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:04 crc kubenswrapper[5055]: I1011 07:15:04.905471 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4klkm\" (UniqueName: \"kubernetes.io/projected/d9b61d74-f126-4515-ba61-151f8ec0b48c-kube-api-access-4klkm\") pod \"neutron-58d4f6c497-jm75b\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.005533 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54c2e818-4fd5-44b3-9bcc-e08b813f83bd" path="/var/lib/kubelet/pods/54c2e818-4fd5-44b3-9bcc-e08b813f83bd/volumes" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.028776 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.308384 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.309748 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.311650 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-56ccf" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.311813 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.311849 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.321731 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.394431 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/491a98fd-dad0-4515-a217-c01bd4aa741d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " pod="openstack/openstackclient" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.394726 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/491a98fd-dad0-4515-a217-c01bd4aa741d-openstack-config\") pod \"openstackclient\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " pod="openstack/openstackclient" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.394774 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/491a98fd-dad0-4515-a217-c01bd4aa741d-openstack-config-secret\") pod \"openstackclient\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " pod="openstack/openstackclient" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.394853 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49cgx\" (UniqueName: \"kubernetes.io/projected/491a98fd-dad0-4515-a217-c01bd4aa741d-kube-api-access-49cgx\") pod \"openstackclient\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " pod="openstack/openstackclient" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.495945 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49cgx\" (UniqueName: \"kubernetes.io/projected/491a98fd-dad0-4515-a217-c01bd4aa741d-kube-api-access-49cgx\") pod \"openstackclient\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " pod="openstack/openstackclient" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.496028 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/491a98fd-dad0-4515-a217-c01bd4aa741d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " pod="openstack/openstackclient" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.496116 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/491a98fd-dad0-4515-a217-c01bd4aa741d-openstack-config\") pod \"openstackclient\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " pod="openstack/openstackclient" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.496260 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/491a98fd-dad0-4515-a217-c01bd4aa741d-openstack-config-secret\") pod \"openstackclient\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " pod="openstack/openstackclient" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.497207 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/491a98fd-dad0-4515-a217-c01bd4aa741d-openstack-config\") pod \"openstackclient\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " pod="openstack/openstackclient" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.501189 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/491a98fd-dad0-4515-a217-c01bd4aa741d-openstack-config-secret\") pod \"openstackclient\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " pod="openstack/openstackclient" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.508372 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/491a98fd-dad0-4515-a217-c01bd4aa741d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " pod="openstack/openstackclient" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.522330 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49cgx\" (UniqueName: \"kubernetes.io/projected/491a98fd-dad0-4515-a217-c01bd4aa741d-kube-api-access-49cgx\") pod \"openstackclient\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " pod="openstack/openstackclient" Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.561424 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-58d4f6c497-jm75b"] Oct 11 07:15:05 crc kubenswrapper[5055]: W1011 07:15:05.563550 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9b61d74_f126_4515_ba61_151f8ec0b48c.slice/crio-8a8ce8c7f50736ad08de9413b899006dfaf779888ac23790547c3cce4f8819e5 WatchSource:0}: Error finding container 8a8ce8c7f50736ad08de9413b899006dfaf779888ac23790547c3cce4f8819e5: Status 404 returned error can't find the container with id 8a8ce8c7f50736ad08de9413b899006dfaf779888ac23790547c3cce4f8819e5 Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.580063 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58d4f6c497-jm75b" event={"ID":"d9b61d74-f126-4515-ba61-151f8ec0b48c","Type":"ContainerStarted","Data":"8a8ce8c7f50736ad08de9413b899006dfaf779888ac23790547c3cce4f8819e5"} Oct 11 07:15:05 crc kubenswrapper[5055]: I1011 07:15:05.665146 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 11 07:15:06 crc kubenswrapper[5055]: I1011 07:15:06.106111 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Oct 11 07:15:06 crc kubenswrapper[5055]: I1011 07:15:06.588914 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"491a98fd-dad0-4515-a217-c01bd4aa741d","Type":"ContainerStarted","Data":"3527254f7e364cace5325ef03ad1de60b3ecbe1aa90683d4abb0c3ac7d0dde76"} Oct 11 07:15:06 crc kubenswrapper[5055]: I1011 07:15:06.590286 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58d4f6c497-jm75b" event={"ID":"d9b61d74-f126-4515-ba61-151f8ec0b48c","Type":"ContainerStarted","Data":"1da381ef06212eb84424fa5dc015bebad8d90c047d971d7358425108cd91869c"} Oct 11 07:15:06 crc kubenswrapper[5055]: I1011 07:15:06.590310 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58d4f6c497-jm75b" event={"ID":"d9b61d74-f126-4515-ba61-151f8ec0b48c","Type":"ContainerStarted","Data":"06242da869598afa53d67090239c671f6b5e00bbf9c80c47ff54cbbc1adc7265"} Oct 11 07:15:06 crc kubenswrapper[5055]: I1011 07:15:06.590417 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:06 crc kubenswrapper[5055]: I1011 07:15:06.612829 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-58d4f6c497-jm75b" podStartSLOduration=2.612813836 podStartE2EDuration="2.612813836s" podCreationTimestamp="2025-10-11 07:15:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:15:06.607362022 +0000 UTC m=+1290.381635829" watchObservedRunningTime="2025-10-11 07:15:06.612813836 +0000 UTC m=+1290.387087643" Oct 11 07:15:09 crc kubenswrapper[5055]: I1011 07:15:09.617351 5055 generic.go:334] "Generic (PLEG): container finished" podID="6ba129d1-5c6b-41a7-9a7f-d469ad919b75" containerID="a36e56b7fca8b4f65e5fb319758ecc354a7e91d67c137d241820472741a23fe5" exitCode=0 Oct 11 07:15:09 crc kubenswrapper[5055]: I1011 07:15:09.617840 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-hjxfj" event={"ID":"6ba129d1-5c6b-41a7-9a7f-d469ad919b75","Type":"ContainerDied","Data":"a36e56b7fca8b4f65e5fb319758ecc354a7e91d67c137d241820472741a23fe5"} Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.128418 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.179291 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5dc68bd5-9z7rv"] Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.179539 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" podUID="02b7eb87-002d-454c-bc42-efe7ffdd18e3" containerName="dnsmasq-dns" containerID="cri-o://5049badca7435a38a8ff7e28892d2b03b19c39cd0a2e43bf3dd847a448880928" gracePeriod=10 Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.424438 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-6c8466445c-jqscq"] Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.436939 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.438887 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6c8466445c-jqscq"] Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.438942 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.439482 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.453083 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.530163 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-combined-ca-bundle\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.530245 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5f76ee4c-bc76-492a-898d-def8ec69e291-run-httpd\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.530287 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-internal-tls-certs\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.530312 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-public-tls-certs\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.530378 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5f76ee4c-bc76-492a-898d-def8ec69e291-log-httpd\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.530397 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c79fj\" (UniqueName: \"kubernetes.io/projected/5f76ee4c-bc76-492a-898d-def8ec69e291-kube-api-access-c79fj\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.530414 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-config-data\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.530463 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f76ee4c-bc76-492a-898d-def8ec69e291-etc-swift\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.634663 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-internal-tls-certs\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.635605 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-public-tls-certs\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.635663 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5f76ee4c-bc76-492a-898d-def8ec69e291-log-httpd\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.635692 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c79fj\" (UniqueName: \"kubernetes.io/projected/5f76ee4c-bc76-492a-898d-def8ec69e291-kube-api-access-c79fj\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.635720 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-config-data\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.635796 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f76ee4c-bc76-492a-898d-def8ec69e291-etc-swift\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.635847 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-combined-ca-bundle\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.635903 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5f76ee4c-bc76-492a-898d-def8ec69e291-run-httpd\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.640288 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5f76ee4c-bc76-492a-898d-def8ec69e291-run-httpd\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.645398 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5f76ee4c-bc76-492a-898d-def8ec69e291-log-httpd\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.652109 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-internal-tls-certs\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.653486 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f76ee4c-bc76-492a-898d-def8ec69e291-etc-swift\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.654643 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-public-tls-certs\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.655318 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-combined-ca-bundle\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.658311 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-config-data\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.665291 5055 generic.go:334] "Generic (PLEG): container finished" podID="02b7eb87-002d-454c-bc42-efe7ffdd18e3" containerID="5049badca7435a38a8ff7e28892d2b03b19c39cd0a2e43bf3dd847a448880928" exitCode=0 Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.665331 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" event={"ID":"02b7eb87-002d-454c-bc42-efe7ffdd18e3","Type":"ContainerDied","Data":"5049badca7435a38a8ff7e28892d2b03b19c39cd0a2e43bf3dd847a448880928"} Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.666259 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c79fj\" (UniqueName: \"kubernetes.io/projected/5f76ee4c-bc76-492a-898d-def8ec69e291-kube-api-access-c79fj\") pod \"swift-proxy-6c8466445c-jqscq\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:11 crc kubenswrapper[5055]: I1011 07:15:11.756602 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.727324 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-hjxfj" event={"ID":"6ba129d1-5c6b-41a7-9a7f-d469ad919b75","Type":"ContainerDied","Data":"766f5d06bb7d9601c08fc7a0fe49a4a5caa6e2d23b469cd2ef36b1658074922a"} Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.728104 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="766f5d06bb7d9601c08fc7a0fe49a4a5caa6e2d23b469cd2ef36b1658074922a" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.773544 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.896689 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-scripts\") pod \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.896809 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-combined-ca-bundle\") pod \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.896846 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5f5t9\" (UniqueName: \"kubernetes.io/projected/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-kube-api-access-5f5t9\") pod \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.896868 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-etc-machine-id\") pod \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.896940 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-config-data\") pod \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.896966 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-db-sync-config-data\") pod \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\" (UID: \"6ba129d1-5c6b-41a7-9a7f-d469ad919b75\") " Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.899465 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "6ba129d1-5c6b-41a7-9a7f-d469ad919b75" (UID: "6ba129d1-5c6b-41a7-9a7f-d469ad919b75"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.904452 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-kube-api-access-5f5t9" (OuterVolumeSpecName: "kube-api-access-5f5t9") pod "6ba129d1-5c6b-41a7-9a7f-d469ad919b75" (UID: "6ba129d1-5c6b-41a7-9a7f-d469ad919b75"). InnerVolumeSpecName "kube-api-access-5f5t9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.905230 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "6ba129d1-5c6b-41a7-9a7f-d469ad919b75" (UID: "6ba129d1-5c6b-41a7-9a7f-d469ad919b75"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.908579 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-scripts" (OuterVolumeSpecName: "scripts") pod "6ba129d1-5c6b-41a7-9a7f-d469ad919b75" (UID: "6ba129d1-5c6b-41a7-9a7f-d469ad919b75"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.926368 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.941381 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ba129d1-5c6b-41a7-9a7f-d469ad919b75" (UID: "6ba129d1-5c6b-41a7-9a7f-d469ad919b75"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.976221 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-config-data" (OuterVolumeSpecName: "config-data") pod "6ba129d1-5c6b-41a7-9a7f-d469ad919b75" (UID: "6ba129d1-5c6b-41a7-9a7f-d469ad919b75"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.998086 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-dns-svc\") pod \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.998186 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-dns-swift-storage-0\") pod \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.998216 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-config\") pod \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.998361 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-ovsdbserver-sb\") pod \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.998385 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-ovsdbserver-nb\") pod \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.998406 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jxnh\" (UniqueName: \"kubernetes.io/projected/02b7eb87-002d-454c-bc42-efe7ffdd18e3-kube-api-access-6jxnh\") pod \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\" (UID: \"02b7eb87-002d-454c-bc42-efe7ffdd18e3\") " Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.998799 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.998822 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.998833 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5f5t9\" (UniqueName: \"kubernetes.io/projected/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-kube-api-access-5f5t9\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.998843 5055 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.998851 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:14 crc kubenswrapper[5055]: I1011 07:15:14.998860 5055 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ba129d1-5c6b-41a7-9a7f-d469ad919b75-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.002182 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02b7eb87-002d-454c-bc42-efe7ffdd18e3-kube-api-access-6jxnh" (OuterVolumeSpecName: "kube-api-access-6jxnh") pod "02b7eb87-002d-454c-bc42-efe7ffdd18e3" (UID: "02b7eb87-002d-454c-bc42-efe7ffdd18e3"). InnerVolumeSpecName "kube-api-access-6jxnh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.048894 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "02b7eb87-002d-454c-bc42-efe7ffdd18e3" (UID: "02b7eb87-002d-454c-bc42-efe7ffdd18e3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.049068 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "02b7eb87-002d-454c-bc42-efe7ffdd18e3" (UID: "02b7eb87-002d-454c-bc42-efe7ffdd18e3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.056450 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-config" (OuterVolumeSpecName: "config") pod "02b7eb87-002d-454c-bc42-efe7ffdd18e3" (UID: "02b7eb87-002d-454c-bc42-efe7ffdd18e3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.056930 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "02b7eb87-002d-454c-bc42-efe7ffdd18e3" (UID: "02b7eb87-002d-454c-bc42-efe7ffdd18e3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.063370 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "02b7eb87-002d-454c-bc42-efe7ffdd18e3" (UID: "02b7eb87-002d-454c-bc42-efe7ffdd18e3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.100358 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.100392 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.100403 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jxnh\" (UniqueName: \"kubernetes.io/projected/02b7eb87-002d-454c-bc42-efe7ffdd18e3-kube-api-access-6jxnh\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.100414 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.100422 5055 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.100433 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02b7eb87-002d-454c-bc42-efe7ffdd18e3-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.226690 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6c8466445c-jqscq"] Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.740095 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.740102 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc68bd5-9z7rv" event={"ID":"02b7eb87-002d-454c-bc42-efe7ffdd18e3","Type":"ContainerDied","Data":"c1e172df710a98505e8e29e5a20bcac5e7f0f26232439c8ccc35c17d7027e9eb"} Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.740489 5055 scope.go:117] "RemoveContainer" containerID="5049badca7435a38a8ff7e28892d2b03b19c39cd0a2e43bf3dd847a448880928" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.742587 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"491a98fd-dad0-4515-a217-c01bd4aa741d","Type":"ContainerStarted","Data":"717aceea1665af2d47ebfdceb4d8f53a94f23eb3af20babe5546bf58c47b4764"} Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.747004 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-hjxfj" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.747564 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6c8466445c-jqscq" event={"ID":"5f76ee4c-bc76-492a-898d-def8ec69e291","Type":"ContainerStarted","Data":"50f25ffb536fa0772a202d0f5854aa815a2d08286c15c5285861895cfbdcbfc1"} Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.747603 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6c8466445c-jqscq" event={"ID":"5f76ee4c-bc76-492a-898d-def8ec69e291","Type":"ContainerStarted","Data":"3f492337a36f4101c61292981fed7289c528328ea9ced5f8117499fb88135159"} Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.747615 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6c8466445c-jqscq" event={"ID":"5f76ee4c-bc76-492a-898d-def8ec69e291","Type":"ContainerStarted","Data":"85f4eca87ee2e65bc1774d422d6ae86c472d1e300f216c966d8229e39cf0c48c"} Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.747721 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.772289 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.2294907 podStartE2EDuration="10.772268587s" podCreationTimestamp="2025-10-11 07:15:05 +0000 UTC" firstStartedPulling="2025-10-11 07:15:06.118059961 +0000 UTC m=+1289.892333768" lastFinishedPulling="2025-10-11 07:15:14.660837848 +0000 UTC m=+1298.435111655" observedRunningTime="2025-10-11 07:15:15.770701873 +0000 UTC m=+1299.544975700" watchObservedRunningTime="2025-10-11 07:15:15.772268587 +0000 UTC m=+1299.546542394" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.779929 5055 scope.go:117] "RemoveContainer" containerID="5e5268369fdabdd2617d30772fabc77e48b8e70f7fdfb630347d834889fb431d" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.824304 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-6c8466445c-jqscq" podStartSLOduration=4.824285628 podStartE2EDuration="4.824285628s" podCreationTimestamp="2025-10-11 07:15:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:15:15.812079023 +0000 UTC m=+1299.586352840" watchObservedRunningTime="2025-10-11 07:15:15.824285628 +0000 UTC m=+1299.598559435" Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.851988 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5dc68bd5-9z7rv"] Oct 11 07:15:15 crc kubenswrapper[5055]: I1011 07:15:15.869349 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5dc68bd5-9z7rv"] Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.163388 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 07:15:16 crc kubenswrapper[5055]: E1011 07:15:16.164231 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02b7eb87-002d-454c-bc42-efe7ffdd18e3" containerName="init" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.164299 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="02b7eb87-002d-454c-bc42-efe7ffdd18e3" containerName="init" Oct 11 07:15:16 crc kubenswrapper[5055]: E1011 07:15:16.164360 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ba129d1-5c6b-41a7-9a7f-d469ad919b75" containerName="cinder-db-sync" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.164411 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ba129d1-5c6b-41a7-9a7f-d469ad919b75" containerName="cinder-db-sync" Oct 11 07:15:16 crc kubenswrapper[5055]: E1011 07:15:16.164471 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02b7eb87-002d-454c-bc42-efe7ffdd18e3" containerName="dnsmasq-dns" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.164524 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="02b7eb87-002d-454c-bc42-efe7ffdd18e3" containerName="dnsmasq-dns" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.164752 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="02b7eb87-002d-454c-bc42-efe7ffdd18e3" containerName="dnsmasq-dns" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.164875 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ba129d1-5c6b-41a7-9a7f-d469ad919b75" containerName="cinder-db-sync" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.165839 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.177372 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.177874 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.178100 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-b8f8l" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.178283 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.198582 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.230689 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-config-data\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.231099 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/45e57763-c6df-4335-aecb-0a01fe415f15-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.231162 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.231185 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-scripts\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.231251 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhjzv\" (UniqueName: \"kubernetes.io/projected/45e57763-c6df-4335-aecb-0a01fe415f15-kube-api-access-nhjzv\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.231294 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.283566 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84bd785c49-64j5m"] Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.284989 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.297799 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bd785c49-64j5m"] Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.333464 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-scripts\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.333549 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-config\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.333571 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-dns-svc\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.333592 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4rdc\" (UniqueName: \"kubernetes.io/projected/c13d308b-a780-44eb-8574-0590dc8316a8-kube-api-access-z4rdc\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.333615 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-ovsdbserver-sb\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.333641 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhjzv\" (UniqueName: \"kubernetes.io/projected/45e57763-c6df-4335-aecb-0a01fe415f15-kube-api-access-nhjzv\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.333669 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.333693 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-config-data\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.333744 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/45e57763-c6df-4335-aecb-0a01fe415f15-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.333783 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-ovsdbserver-nb\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.333817 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-dns-swift-storage-0\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.333851 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.334165 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/45e57763-c6df-4335-aecb-0a01fe415f15-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.344540 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.345236 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.352497 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-config-data\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.363225 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhjzv\" (UniqueName: \"kubernetes.io/projected/45e57763-c6df-4335-aecb-0a01fe415f15-kube-api-access-nhjzv\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.369724 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-scripts\") pod \"cinder-scheduler-0\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.435328 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-dns-swift-storage-0\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.435459 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-config\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.435492 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-dns-svc\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.435521 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4rdc\" (UniqueName: \"kubernetes.io/projected/c13d308b-a780-44eb-8574-0590dc8316a8-kube-api-access-z4rdc\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.435546 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-ovsdbserver-sb\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.435624 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-ovsdbserver-nb\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.436642 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-ovsdbserver-nb\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.436897 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-dns-svc\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.437618 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-dns-swift-storage-0\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.438398 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-config\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.438416 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-ovsdbserver-sb\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.462562 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4rdc\" (UniqueName: \"kubernetes.io/projected/c13d308b-a780-44eb-8574-0590dc8316a8-kube-api-access-z4rdc\") pod \"dnsmasq-dns-84bd785c49-64j5m\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.479052 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.480712 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.481879 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.486202 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.488298 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.541252 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a420852d-3b6b-43b8-afda-f9e0ce57462d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.541327 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a420852d-3b6b-43b8-afda-f9e0ce57462d-logs\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.541361 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.541442 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-scripts\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.541483 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-config-data-custom\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.541526 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-config-data\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.541575 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cf46\" (UniqueName: \"kubernetes.io/projected/a420852d-3b6b-43b8-afda-f9e0ce57462d-kube-api-access-5cf46\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.615147 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.642688 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-config-data\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.642737 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cf46\" (UniqueName: \"kubernetes.io/projected/a420852d-3b6b-43b8-afda-f9e0ce57462d-kube-api-access-5cf46\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.642889 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a420852d-3b6b-43b8-afda-f9e0ce57462d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.642912 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a420852d-3b6b-43b8-afda-f9e0ce57462d-logs\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.642932 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.642967 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-scripts\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.642985 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-config-data-custom\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.644160 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a420852d-3b6b-43b8-afda-f9e0ce57462d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.644232 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a420852d-3b6b-43b8-afda-f9e0ce57462d-logs\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.651518 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-scripts\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.651626 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-config-data\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.651746 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-config-data-custom\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.652593 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.667730 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cf46\" (UniqueName: \"kubernetes.io/projected/a420852d-3b6b-43b8-afda-f9e0ce57462d-kube-api-access-5cf46\") pod \"cinder-api-0\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.759859 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.828479 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.935216 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.935515 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="ceilometer-central-agent" containerID="cri-o://544482de6387283242f4fb3c7aced088405f9691250b6885a7b699ebd124781e" gracePeriod=30 Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.936044 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="ceilometer-notification-agent" containerID="cri-o://50c8bf8be092aa5575e850de8cf541ccd2504f5ef84cb40d39f711bf8f67f741" gracePeriod=30 Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.936053 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="proxy-httpd" containerID="cri-o://2dfc44369f1cc20d8ffeb963d8c4f3181d8d04f6e640f9ae10071650e167e566" gracePeriod=30 Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.936175 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="sg-core" containerID="cri-o://4d2f8714dd7ca63d6ed1f16a977f4eeb14d2017a21fd69bd2909d022949a7e3e" gracePeriod=30 Oct 11 07:15:16 crc kubenswrapper[5055]: I1011 07:15:16.945398 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.010207 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02b7eb87-002d-454c-bc42-efe7ffdd18e3" path="/var/lib/kubelet/pods/02b7eb87-002d-454c-bc42-efe7ffdd18e3/volumes" Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.050446 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.165234 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bd785c49-64j5m"] Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.354739 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.762327 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.764385 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="b8eee0b8-baa6-4cbb-a0ee-230b89adb073" containerName="glance-log" containerID="cri-o://1a9dd45ee3e83283bc4c67687d9a75523999a033d2bab86f134ae42061e8bef0" gracePeriod=30 Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.764459 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="b8eee0b8-baa6-4cbb-a0ee-230b89adb073" containerName="glance-httpd" containerID="cri-o://cb16ae1b9b332802488fc88c04afb8c3ed39071dd545eb4b99255d83d4e3c16a" gracePeriod=30 Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.795672 5055 generic.go:334] "Generic (PLEG): container finished" podID="c13d308b-a780-44eb-8574-0590dc8316a8" containerID="fff0f158566c1b8c2fe1c4a0cf07e68ef221a4c5c94e05c7eb576b98e99d98af" exitCode=0 Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.796052 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bd785c49-64j5m" event={"ID":"c13d308b-a780-44eb-8574-0590dc8316a8","Type":"ContainerDied","Data":"fff0f158566c1b8c2fe1c4a0cf07e68ef221a4c5c94e05c7eb576b98e99d98af"} Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.796088 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bd785c49-64j5m" event={"ID":"c13d308b-a780-44eb-8574-0590dc8316a8","Type":"ContainerStarted","Data":"483b092da9ad9cba31f808b1254ee2f56bb07dfc9273226ed65c44250a5f8331"} Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.800141 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"45e57763-c6df-4335-aecb-0a01fe415f15","Type":"ContainerStarted","Data":"078437a74396bef91c516e8a346ae6375486c138fc7191914271e30778a11980"} Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.805394 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-stvkx"] Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.807554 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-stvkx" Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.810428 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a420852d-3b6b-43b8-afda-f9e0ce57462d","Type":"ContainerStarted","Data":"8f91f09e023567998ac660e9f014de3b252e4b0d1bfce336d9dc62a7152a4bb6"} Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.865482 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-stvkx"] Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.872975 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5qwf\" (UniqueName: \"kubernetes.io/projected/6899c71e-b2d8-45f6-bc9b-e37542b5f544-kube-api-access-r5qwf\") pod \"nova-api-db-create-stvkx\" (UID: \"6899c71e-b2d8-45f6-bc9b-e37542b5f544\") " pod="openstack/nova-api-db-create-stvkx" Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.940721 5055 generic.go:334] "Generic (PLEG): container finished" podID="7160286e-8686-49e8-850f-9bdc283a4a50" containerID="2dfc44369f1cc20d8ffeb963d8c4f3181d8d04f6e640f9ae10071650e167e566" exitCode=0 Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.941094 5055 generic.go:334] "Generic (PLEG): container finished" podID="7160286e-8686-49e8-850f-9bdc283a4a50" containerID="4d2f8714dd7ca63d6ed1f16a977f4eeb14d2017a21fd69bd2909d022949a7e3e" exitCode=2 Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.941111 5055 generic.go:334] "Generic (PLEG): container finished" podID="7160286e-8686-49e8-850f-9bdc283a4a50" containerID="544482de6387283242f4fb3c7aced088405f9691250b6885a7b699ebd124781e" exitCode=0 Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.940756 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7160286e-8686-49e8-850f-9bdc283a4a50","Type":"ContainerDied","Data":"2dfc44369f1cc20d8ffeb963d8c4f3181d8d04f6e640f9ae10071650e167e566"} Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.941172 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7160286e-8686-49e8-850f-9bdc283a4a50","Type":"ContainerDied","Data":"4d2f8714dd7ca63d6ed1f16a977f4eeb14d2017a21fd69bd2909d022949a7e3e"} Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.941189 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7160286e-8686-49e8-850f-9bdc283a4a50","Type":"ContainerDied","Data":"544482de6387283242f4fb3c7aced088405f9691250b6885a7b699ebd124781e"} Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.972829 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-4ccpj"] Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.974097 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-4ccpj" Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.981923 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5qwf\" (UniqueName: \"kubernetes.io/projected/6899c71e-b2d8-45f6-bc9b-e37542b5f544-kube-api-access-r5qwf\") pod \"nova-api-db-create-stvkx\" (UID: \"6899c71e-b2d8-45f6-bc9b-e37542b5f544\") " pod="openstack/nova-api-db-create-stvkx" Oct 11 07:15:17 crc kubenswrapper[5055]: I1011 07:15:17.997153 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-4ccpj"] Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.010085 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5qwf\" (UniqueName: \"kubernetes.io/projected/6899c71e-b2d8-45f6-bc9b-e37542b5f544-kube-api-access-r5qwf\") pod \"nova-api-db-create-stvkx\" (UID: \"6899c71e-b2d8-45f6-bc9b-e37542b5f544\") " pod="openstack/nova-api-db-create-stvkx" Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.015324 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-stvkx" Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.087593 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbshh\" (UniqueName: \"kubernetes.io/projected/1103eafd-fb3f-472a-8a90-2733ef956fe2-kube-api-access-pbshh\") pod \"nova-cell0-db-create-4ccpj\" (UID: \"1103eafd-fb3f-472a-8a90-2733ef956fe2\") " pod="openstack/nova-cell0-db-create-4ccpj" Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.110250 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-h7jx7"] Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.111800 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-h7jx7" Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.125643 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-h7jx7"] Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.191001 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q68gg\" (UniqueName: \"kubernetes.io/projected/472e70e9-3fa9-4f4d-a1ae-646e04c485f5-kube-api-access-q68gg\") pod \"nova-cell1-db-create-h7jx7\" (UID: \"472e70e9-3fa9-4f4d-a1ae-646e04c485f5\") " pod="openstack/nova-cell1-db-create-h7jx7" Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.191055 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbshh\" (UniqueName: \"kubernetes.io/projected/1103eafd-fb3f-472a-8a90-2733ef956fe2-kube-api-access-pbshh\") pod \"nova-cell0-db-create-4ccpj\" (UID: \"1103eafd-fb3f-472a-8a90-2733ef956fe2\") " pod="openstack/nova-cell0-db-create-4ccpj" Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.210460 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbshh\" (UniqueName: \"kubernetes.io/projected/1103eafd-fb3f-472a-8a90-2733ef956fe2-kube-api-access-pbshh\") pod \"nova-cell0-db-create-4ccpj\" (UID: \"1103eafd-fb3f-472a-8a90-2733ef956fe2\") " pod="openstack/nova-cell0-db-create-4ccpj" Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.292693 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q68gg\" (UniqueName: \"kubernetes.io/projected/472e70e9-3fa9-4f4d-a1ae-646e04c485f5-kube-api-access-q68gg\") pod \"nova-cell1-db-create-h7jx7\" (UID: \"472e70e9-3fa9-4f4d-a1ae-646e04c485f5\") " pod="openstack/nova-cell1-db-create-h7jx7" Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.313812 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q68gg\" (UniqueName: \"kubernetes.io/projected/472e70e9-3fa9-4f4d-a1ae-646e04c485f5-kube-api-access-q68gg\") pod \"nova-cell1-db-create-h7jx7\" (UID: \"472e70e9-3fa9-4f4d-a1ae-646e04c485f5\") " pod="openstack/nova-cell1-db-create-h7jx7" Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.325887 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-4ccpj" Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.437354 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-h7jx7" Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.627299 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-stvkx"] Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.795355 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-4ccpj"] Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.953972 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a420852d-3b6b-43b8-afda-f9e0ce57462d","Type":"ContainerStarted","Data":"b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069"} Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.957309 5055 generic.go:334] "Generic (PLEG): container finished" podID="b8eee0b8-baa6-4cbb-a0ee-230b89adb073" containerID="1a9dd45ee3e83283bc4c67687d9a75523999a033d2bab86f134ae42061e8bef0" exitCode=143 Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.957365 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b8eee0b8-baa6-4cbb-a0ee-230b89adb073","Type":"ContainerDied","Data":"1a9dd45ee3e83283bc4c67687d9a75523999a033d2bab86f134ae42061e8bef0"} Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.958832 5055 generic.go:334] "Generic (PLEG): container finished" podID="6899c71e-b2d8-45f6-bc9b-e37542b5f544" containerID="f6898264fb5556b3944d3a8689d11d1d7edbf1e2e1599308646deff27daa9023" exitCode=0 Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.958891 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-stvkx" event={"ID":"6899c71e-b2d8-45f6-bc9b-e37542b5f544","Type":"ContainerDied","Data":"f6898264fb5556b3944d3a8689d11d1d7edbf1e2e1599308646deff27daa9023"} Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.958909 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-stvkx" event={"ID":"6899c71e-b2d8-45f6-bc9b-e37542b5f544","Type":"ContainerStarted","Data":"3bf8a6c1e9bce24255c4652211ba7fd7e5e98818a554d189c756f54f4f7e1b41"} Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.960909 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-4ccpj" event={"ID":"1103eafd-fb3f-472a-8a90-2733ef956fe2","Type":"ContainerStarted","Data":"6659d0c4461df2e6bc032d6b24c81deed89aa14959a5edbd27354261be8985f6"} Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.962665 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bd785c49-64j5m" event={"ID":"c13d308b-a780-44eb-8574-0590dc8316a8","Type":"ContainerStarted","Data":"aac783028ec9596d55d8bef35fe4aa03e5b126d8adc851cd8aea1aa6f04ad467"} Oct 11 07:15:18 crc kubenswrapper[5055]: I1011 07:15:18.963447 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:19 crc kubenswrapper[5055]: I1011 07:15:19.019782 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-84bd785c49-64j5m" podStartSLOduration=3.019741649 podStartE2EDuration="3.019741649s" podCreationTimestamp="2025-10-11 07:15:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:15:18.99713192 +0000 UTC m=+1302.771405737" watchObservedRunningTime="2025-10-11 07:15:19.019741649 +0000 UTC m=+1302.794015456" Oct 11 07:15:19 crc kubenswrapper[5055]: I1011 07:15:19.037110 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-h7jx7"] Oct 11 07:15:19 crc kubenswrapper[5055]: W1011 07:15:19.051495 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod472e70e9_3fa9_4f4d_a1ae_646e04c485f5.slice/crio-696e0512b75a1d4bd1d07c5f354c70cda17704af6ba11559b09176a4631c2e10 WatchSource:0}: Error finding container 696e0512b75a1d4bd1d07c5f354c70cda17704af6ba11559b09176a4631c2e10: Status 404 returned error can't find the container with id 696e0512b75a1d4bd1d07c5f354c70cda17704af6ba11559b09176a4631c2e10 Oct 11 07:15:19 crc kubenswrapper[5055]: I1011 07:15:19.687483 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Oct 11 07:15:19 crc kubenswrapper[5055]: I1011 07:15:19.975821 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"45e57763-c6df-4335-aecb-0a01fe415f15","Type":"ContainerStarted","Data":"c2598293551fd22159528d1558adb9a42faaf9dea96b9844135163f3b8d38805"} Oct 11 07:15:19 crc kubenswrapper[5055]: I1011 07:15:19.976188 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"45e57763-c6df-4335-aecb-0a01fe415f15","Type":"ContainerStarted","Data":"edfa2dc9bfac46e6c01853775728c6388d0edf4c0aabf22de785bc49810e87d4"} Oct 11 07:15:19 crc kubenswrapper[5055]: I1011 07:15:19.977368 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a420852d-3b6b-43b8-afda-f9e0ce57462d","Type":"ContainerStarted","Data":"ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db"} Oct 11 07:15:19 crc kubenswrapper[5055]: I1011 07:15:19.977474 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Oct 11 07:15:19 crc kubenswrapper[5055]: I1011 07:15:19.978765 5055 generic.go:334] "Generic (PLEG): container finished" podID="1103eafd-fb3f-472a-8a90-2733ef956fe2" containerID="608490c678382fe22f1f2f0a325b741a076db7f6aa12df1a4bfcf555d6b2291b" exitCode=0 Oct 11 07:15:19 crc kubenswrapper[5055]: I1011 07:15:19.978856 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-4ccpj" event={"ID":"1103eafd-fb3f-472a-8a90-2733ef956fe2","Type":"ContainerDied","Data":"608490c678382fe22f1f2f0a325b741a076db7f6aa12df1a4bfcf555d6b2291b"} Oct 11 07:15:19 crc kubenswrapper[5055]: I1011 07:15:19.979924 5055 generic.go:334] "Generic (PLEG): container finished" podID="472e70e9-3fa9-4f4d-a1ae-646e04c485f5" containerID="7065c574492a21e6fdcf4a81ea128b10e7f9f4f28e6b62ea66aa04c7dd1957bb" exitCode=0 Oct 11 07:15:19 crc kubenswrapper[5055]: I1011 07:15:19.979977 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-h7jx7" event={"ID":"472e70e9-3fa9-4f4d-a1ae-646e04c485f5","Type":"ContainerDied","Data":"7065c574492a21e6fdcf4a81ea128b10e7f9f4f28e6b62ea66aa04c7dd1957bb"} Oct 11 07:15:19 crc kubenswrapper[5055]: I1011 07:15:19.980081 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-h7jx7" event={"ID":"472e70e9-3fa9-4f4d-a1ae-646e04c485f5","Type":"ContainerStarted","Data":"696e0512b75a1d4bd1d07c5f354c70cda17704af6ba11559b09176a4631c2e10"} Oct 11 07:15:20 crc kubenswrapper[5055]: I1011 07:15:20.013046 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=2.910974864 podStartE2EDuration="4.013019737s" podCreationTimestamp="2025-10-11 07:15:16 +0000 UTC" firstStartedPulling="2025-10-11 07:15:17.072074263 +0000 UTC m=+1300.846348070" lastFinishedPulling="2025-10-11 07:15:18.174119136 +0000 UTC m=+1301.948392943" observedRunningTime="2025-10-11 07:15:19.999865255 +0000 UTC m=+1303.774139072" watchObservedRunningTime="2025-10-11 07:15:20.013019737 +0000 UTC m=+1303.787293544" Oct 11 07:15:20 crc kubenswrapper[5055]: I1011 07:15:20.053649 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.053629146 podStartE2EDuration="4.053629146s" podCreationTimestamp="2025-10-11 07:15:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:15:20.043857019 +0000 UTC m=+1303.818130836" watchObservedRunningTime="2025-10-11 07:15:20.053629146 +0000 UTC m=+1303.827902953" Oct 11 07:15:20 crc kubenswrapper[5055]: I1011 07:15:20.229669 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:15:20 crc kubenswrapper[5055]: I1011 07:15:20.230355 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="cc75f86e-fb96-4f2d-a852-66912a678430" containerName="glance-log" containerID="cri-o://c9b70bda0cf162db2afe12d3f3dd8a8cb6c6497d562f02b62d86cb61ba89eac8" gracePeriod=30 Oct 11 07:15:20 crc kubenswrapper[5055]: I1011 07:15:20.230879 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="cc75f86e-fb96-4f2d-a852-66912a678430" containerName="glance-httpd" containerID="cri-o://d5026e4621b733cfc25e44d6134706b5fa3663e90b2437e596f40565201fe479" gracePeriod=30 Oct 11 07:15:20 crc kubenswrapper[5055]: I1011 07:15:20.445040 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-stvkx" Oct 11 07:15:20 crc kubenswrapper[5055]: I1011 07:15:20.543257 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5qwf\" (UniqueName: \"kubernetes.io/projected/6899c71e-b2d8-45f6-bc9b-e37542b5f544-kube-api-access-r5qwf\") pod \"6899c71e-b2d8-45f6-bc9b-e37542b5f544\" (UID: \"6899c71e-b2d8-45f6-bc9b-e37542b5f544\") " Oct 11 07:15:20 crc kubenswrapper[5055]: I1011 07:15:20.550229 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6899c71e-b2d8-45f6-bc9b-e37542b5f544-kube-api-access-r5qwf" (OuterVolumeSpecName: "kube-api-access-r5qwf") pod "6899c71e-b2d8-45f6-bc9b-e37542b5f544" (UID: "6899c71e-b2d8-45f6-bc9b-e37542b5f544"). InnerVolumeSpecName "kube-api-access-r5qwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:20 crc kubenswrapper[5055]: I1011 07:15:20.645769 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5qwf\" (UniqueName: \"kubernetes.io/projected/6899c71e-b2d8-45f6-bc9b-e37542b5f544-kube-api-access-r5qwf\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:20 crc kubenswrapper[5055]: I1011 07:15:20.992431 5055 generic.go:334] "Generic (PLEG): container finished" podID="cc75f86e-fb96-4f2d-a852-66912a678430" containerID="c9b70bda0cf162db2afe12d3f3dd8a8cb6c6497d562f02b62d86cb61ba89eac8" exitCode=143 Oct 11 07:15:20 crc kubenswrapper[5055]: I1011 07:15:20.992541 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"cc75f86e-fb96-4f2d-a852-66912a678430","Type":"ContainerDied","Data":"c9b70bda0cf162db2afe12d3f3dd8a8cb6c6497d562f02b62d86cb61ba89eac8"} Oct 11 07:15:20 crc kubenswrapper[5055]: I1011 07:15:20.996900 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-stvkx" Oct 11 07:15:20 crc kubenswrapper[5055]: I1011 07:15:20.997121 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="a420852d-3b6b-43b8-afda-f9e0ce57462d" containerName="cinder-api-log" containerID="cri-o://b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069" gracePeriod=30 Oct 11 07:15:20 crc kubenswrapper[5055]: I1011 07:15:20.997309 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="a420852d-3b6b-43b8-afda-f9e0ce57462d" containerName="cinder-api" containerID="cri-o://ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db" gracePeriod=30 Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.007248 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-stvkx" event={"ID":"6899c71e-b2d8-45f6-bc9b-e37542b5f544","Type":"ContainerDied","Data":"3bf8a6c1e9bce24255c4652211ba7fd7e5e98818a554d189c756f54f4f7e1b41"} Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.007293 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3bf8a6c1e9bce24255c4652211ba7fd7e5e98818a554d189c756f54f4f7e1b41" Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.203524 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="b8eee0b8-baa6-4cbb-a0ee-230b89adb073" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.143:9292/healthcheck\": read tcp 10.217.0.2:34802->10.217.0.143:9292: read: connection reset by peer" Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.203603 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="b8eee0b8-baa6-4cbb-a0ee-230b89adb073" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.143:9292/healthcheck\": read tcp 10.217.0.2:34796->10.217.0.143:9292: read: connection reset by peer" Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.458101 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-4ccpj" Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.517688 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.523106 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-h7jx7" Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.565664 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbshh\" (UniqueName: \"kubernetes.io/projected/1103eafd-fb3f-472a-8a90-2733ef956fe2-kube-api-access-pbshh\") pod \"1103eafd-fb3f-472a-8a90-2733ef956fe2\" (UID: \"1103eafd-fb3f-472a-8a90-2733ef956fe2\") " Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.565869 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q68gg\" (UniqueName: \"kubernetes.io/projected/472e70e9-3fa9-4f4d-a1ae-646e04c485f5-kube-api-access-q68gg\") pod \"472e70e9-3fa9-4f4d-a1ae-646e04c485f5\" (UID: \"472e70e9-3fa9-4f4d-a1ae-646e04c485f5\") " Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.571069 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/472e70e9-3fa9-4f4d-a1ae-646e04c485f5-kube-api-access-q68gg" (OuterVolumeSpecName: "kube-api-access-q68gg") pod "472e70e9-3fa9-4f4d-a1ae-646e04c485f5" (UID: "472e70e9-3fa9-4f4d-a1ae-646e04c485f5"). InnerVolumeSpecName "kube-api-access-q68gg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.571289 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1103eafd-fb3f-472a-8a90-2733ef956fe2-kube-api-access-pbshh" (OuterVolumeSpecName: "kube-api-access-pbshh") pod "1103eafd-fb3f-472a-8a90-2733ef956fe2" (UID: "1103eafd-fb3f-472a-8a90-2733ef956fe2"). InnerVolumeSpecName "kube-api-access-pbshh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.668609 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q68gg\" (UniqueName: \"kubernetes.io/projected/472e70e9-3fa9-4f4d-a1ae-646e04c485f5-kube-api-access-q68gg\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.668872 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbshh\" (UniqueName: \"kubernetes.io/projected/1103eafd-fb3f-472a-8a90-2733ef956fe2-kube-api-access-pbshh\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.770874 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.774086 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:15:21 crc kubenswrapper[5055]: I1011 07:15:21.839608 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.155:3000/\": dial tcp 10.217.0.155:3000: connect: connection refused" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.013489 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-4ccpj" event={"ID":"1103eafd-fb3f-472a-8a90-2733ef956fe2","Type":"ContainerDied","Data":"6659d0c4461df2e6bc032d6b24c81deed89aa14959a5edbd27354261be8985f6"} Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.013788 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6659d0c4461df2e6bc032d6b24c81deed89aa14959a5edbd27354261be8985f6" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.013849 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-4ccpj" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.018573 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-h7jx7" event={"ID":"472e70e9-3fa9-4f4d-a1ae-646e04c485f5","Type":"ContainerDied","Data":"696e0512b75a1d4bd1d07c5f354c70cda17704af6ba11559b09176a4631c2e10"} Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.018638 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="696e0512b75a1d4bd1d07c5f354c70cda17704af6ba11559b09176a4631c2e10" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.018684 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-h7jx7" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.020652 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.022698 5055 generic.go:334] "Generic (PLEG): container finished" podID="a420852d-3b6b-43b8-afda-f9e0ce57462d" containerID="ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db" exitCode=0 Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.022726 5055 generic.go:334] "Generic (PLEG): container finished" podID="a420852d-3b6b-43b8-afda-f9e0ce57462d" containerID="b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069" exitCode=143 Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.022821 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a420852d-3b6b-43b8-afda-f9e0ce57462d","Type":"ContainerDied","Data":"ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db"} Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.022861 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a420852d-3b6b-43b8-afda-f9e0ce57462d","Type":"ContainerDied","Data":"b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069"} Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.022885 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a420852d-3b6b-43b8-afda-f9e0ce57462d","Type":"ContainerDied","Data":"8f91f09e023567998ac660e9f014de3b252e4b0d1bfce336d9dc62a7152a4bb6"} Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.022900 5055 scope.go:117] "RemoveContainer" containerID="ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.029436 5055 generic.go:334] "Generic (PLEG): container finished" podID="b8eee0b8-baa6-4cbb-a0ee-230b89adb073" containerID="cb16ae1b9b332802488fc88c04afb8c3ed39071dd545eb4b99255d83d4e3c16a" exitCode=0 Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.029496 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b8eee0b8-baa6-4cbb-a0ee-230b89adb073","Type":"ContainerDied","Data":"cb16ae1b9b332802488fc88c04afb8c3ed39071dd545eb4b99255d83d4e3c16a"} Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.029521 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b8eee0b8-baa6-4cbb-a0ee-230b89adb073","Type":"ContainerDied","Data":"bdec8129aeaf64c9cca57c44845abb536e328d18fa6aeea9b86d28616395ddd1"} Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.029531 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bdec8129aeaf64c9cca57c44845abb536e328d18fa6aeea9b86d28616395ddd1" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.031248 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.035733 5055 generic.go:334] "Generic (PLEG): container finished" podID="7160286e-8686-49e8-850f-9bdc283a4a50" containerID="50c8bf8be092aa5575e850de8cf541ccd2504f5ef84cb40d39f711bf8f67f741" exitCode=0 Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.035934 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7160286e-8686-49e8-850f-9bdc283a4a50","Type":"ContainerDied","Data":"50c8bf8be092aa5575e850de8cf541ccd2504f5ef84cb40d39f711bf8f67f741"} Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.075647 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-combined-ca-bundle\") pod \"a420852d-3b6b-43b8-afda-f9e0ce57462d\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.075848 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a420852d-3b6b-43b8-afda-f9e0ce57462d-etc-machine-id\") pod \"a420852d-3b6b-43b8-afda-f9e0ce57462d\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.075955 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phpk5\" (UniqueName: \"kubernetes.io/projected/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-kube-api-access-phpk5\") pod \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.076072 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-public-tls-certs\") pod \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.076173 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-scripts\") pod \"a420852d-3b6b-43b8-afda-f9e0ce57462d\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.076262 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-scripts\") pod \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.076346 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-config-data\") pod \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.076436 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a420852d-3b6b-43b8-afda-f9e0ce57462d-logs\") pod \"a420852d-3b6b-43b8-afda-f9e0ce57462d\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.076554 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-logs\") pod \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.076618 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-config-data-custom\") pod \"a420852d-3b6b-43b8-afda-f9e0ce57462d\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.076731 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-combined-ca-bundle\") pod \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.076913 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5cf46\" (UniqueName: \"kubernetes.io/projected/a420852d-3b6b-43b8-afda-f9e0ce57462d-kube-api-access-5cf46\") pod \"a420852d-3b6b-43b8-afda-f9e0ce57462d\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.076988 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.077099 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-httpd-run\") pod \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\" (UID: \"b8eee0b8-baa6-4cbb-a0ee-230b89adb073\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.077176 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-config-data\") pod \"a420852d-3b6b-43b8-afda-f9e0ce57462d\" (UID: \"a420852d-3b6b-43b8-afda-f9e0ce57462d\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.078581 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "b8eee0b8-baa6-4cbb-a0ee-230b89adb073" (UID: "b8eee0b8-baa6-4cbb-a0ee-230b89adb073"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.085287 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "b8eee0b8-baa6-4cbb-a0ee-230b89adb073" (UID: "b8eee0b8-baa6-4cbb-a0ee-230b89adb073"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.085577 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a420852d-3b6b-43b8-afda-f9e0ce57462d-logs" (OuterVolumeSpecName: "logs") pod "a420852d-3b6b-43b8-afda-f9e0ce57462d" (UID: "a420852d-3b6b-43b8-afda-f9e0ce57462d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.086175 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-logs" (OuterVolumeSpecName: "logs") pod "b8eee0b8-baa6-4cbb-a0ee-230b89adb073" (UID: "b8eee0b8-baa6-4cbb-a0ee-230b89adb073"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.086268 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a420852d-3b6b-43b8-afda-f9e0ce57462d-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a420852d-3b6b-43b8-afda-f9e0ce57462d" (UID: "a420852d-3b6b-43b8-afda-f9e0ce57462d"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.097918 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a420852d-3b6b-43b8-afda-f9e0ce57462d" (UID: "a420852d-3b6b-43b8-afda-f9e0ce57462d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.103425 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a420852d-3b6b-43b8-afda-f9e0ce57462d-kube-api-access-5cf46" (OuterVolumeSpecName: "kube-api-access-5cf46") pod "a420852d-3b6b-43b8-afda-f9e0ce57462d" (UID: "a420852d-3b6b-43b8-afda-f9e0ce57462d"). InnerVolumeSpecName "kube-api-access-5cf46". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.104313 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-kube-api-access-phpk5" (OuterVolumeSpecName: "kube-api-access-phpk5") pod "b8eee0b8-baa6-4cbb-a0ee-230b89adb073" (UID: "b8eee0b8-baa6-4cbb-a0ee-230b89adb073"). InnerVolumeSpecName "kube-api-access-phpk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.108999 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-scripts" (OuterVolumeSpecName: "scripts") pod "b8eee0b8-baa6-4cbb-a0ee-230b89adb073" (UID: "b8eee0b8-baa6-4cbb-a0ee-230b89adb073"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.111150 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-scripts" (OuterVolumeSpecName: "scripts") pod "a420852d-3b6b-43b8-afda-f9e0ce57462d" (UID: "a420852d-3b6b-43b8-afda-f9e0ce57462d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.114178 5055 scope.go:117] "RemoveContainer" containerID="b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.140485 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a420852d-3b6b-43b8-afda-f9e0ce57462d" (UID: "a420852d-3b6b-43b8-afda-f9e0ce57462d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.150039 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b8eee0b8-baa6-4cbb-a0ee-230b89adb073" (UID: "b8eee0b8-baa6-4cbb-a0ee-230b89adb073"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.150090 5055 scope.go:117] "RemoveContainer" containerID="ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db" Oct 11 07:15:22 crc kubenswrapper[5055]: E1011 07:15:22.153585 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db\": container with ID starting with ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db not found: ID does not exist" containerID="ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.153650 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db"} err="failed to get container status \"ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db\": rpc error: code = NotFound desc = could not find container \"ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db\": container with ID starting with ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db not found: ID does not exist" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.153698 5055 scope.go:117] "RemoveContainer" containerID="b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069" Oct 11 07:15:22 crc kubenswrapper[5055]: E1011 07:15:22.154217 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069\": container with ID starting with b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069 not found: ID does not exist" containerID="b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.154266 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069"} err="failed to get container status \"b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069\": rpc error: code = NotFound desc = could not find container \"b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069\": container with ID starting with b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069 not found: ID does not exist" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.154281 5055 scope.go:117] "RemoveContainer" containerID="ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.157659 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db"} err="failed to get container status \"ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db\": rpc error: code = NotFound desc = could not find container \"ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db\": container with ID starting with ea08fb2b67f5280a114a524161d25e00e0cd4de73f0cfc5038d62b110b1bd1db not found: ID does not exist" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.157695 5055 scope.go:117] "RemoveContainer" containerID="b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.158046 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069"} err="failed to get container status \"b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069\": rpc error: code = NotFound desc = could not find container \"b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069\": container with ID starting with b29124e202c226a33d55f39c3e61fc587c994118eb54587af7ab4de3af104069 not found: ID does not exist" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.163386 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-config-data" (OuterVolumeSpecName: "config-data") pod "a420852d-3b6b-43b8-afda-f9e0ce57462d" (UID: "a420852d-3b6b-43b8-afda-f9e0ce57462d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.163732 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b8eee0b8-baa6-4cbb-a0ee-230b89adb073" (UID: "b8eee0b8-baa6-4cbb-a0ee-230b89adb073"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.179394 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.179602 5055 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.179666 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.179722 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5cf46\" (UniqueName: \"kubernetes.io/projected/a420852d-3b6b-43b8-afda-f9e0ce57462d-kube-api-access-5cf46\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.179811 5055 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.179874 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.179928 5055 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.180060 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.180122 5055 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a420852d-3b6b-43b8-afda-f9e0ce57462d-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.180183 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phpk5\" (UniqueName: \"kubernetes.io/projected/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-kube-api-access-phpk5\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.180247 5055 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.180300 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a420852d-3b6b-43b8-afda-f9e0ce57462d-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.180356 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.180418 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a420852d-3b6b-43b8-afda-f9e0ce57462d-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.189958 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-config-data" (OuterVolumeSpecName: "config-data") pod "b8eee0b8-baa6-4cbb-a0ee-230b89adb073" (UID: "b8eee0b8-baa6-4cbb-a0ee-230b89adb073"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.239616 5055 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.282125 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8eee0b8-baa6-4cbb-a0ee-230b89adb073-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.282163 5055 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.409421 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.488030 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-config-data\") pod \"7160286e-8686-49e8-850f-9bdc283a4a50\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.488119 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-sg-core-conf-yaml\") pod \"7160286e-8686-49e8-850f-9bdc283a4a50\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.488183 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7160286e-8686-49e8-850f-9bdc283a4a50-log-httpd\") pod \"7160286e-8686-49e8-850f-9bdc283a4a50\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.488226 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7160286e-8686-49e8-850f-9bdc283a4a50-run-httpd\") pod \"7160286e-8686-49e8-850f-9bdc283a4a50\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.488273 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-scripts\") pod \"7160286e-8686-49e8-850f-9bdc283a4a50\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.488322 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-combined-ca-bundle\") pod \"7160286e-8686-49e8-850f-9bdc283a4a50\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.488374 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ql87c\" (UniqueName: \"kubernetes.io/projected/7160286e-8686-49e8-850f-9bdc283a4a50-kube-api-access-ql87c\") pod \"7160286e-8686-49e8-850f-9bdc283a4a50\" (UID: \"7160286e-8686-49e8-850f-9bdc283a4a50\") " Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.488895 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7160286e-8686-49e8-850f-9bdc283a4a50-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7160286e-8686-49e8-850f-9bdc283a4a50" (UID: "7160286e-8686-49e8-850f-9bdc283a4a50"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.491471 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7160286e-8686-49e8-850f-9bdc283a4a50-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7160286e-8686-49e8-850f-9bdc283a4a50" (UID: "7160286e-8686-49e8-850f-9bdc283a4a50"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.494243 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-scripts" (OuterVolumeSpecName: "scripts") pod "7160286e-8686-49e8-850f-9bdc283a4a50" (UID: "7160286e-8686-49e8-850f-9bdc283a4a50"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.494334 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7160286e-8686-49e8-850f-9bdc283a4a50-kube-api-access-ql87c" (OuterVolumeSpecName: "kube-api-access-ql87c") pod "7160286e-8686-49e8-850f-9bdc283a4a50" (UID: "7160286e-8686-49e8-850f-9bdc283a4a50"). InnerVolumeSpecName "kube-api-access-ql87c". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.533214 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7160286e-8686-49e8-850f-9bdc283a4a50" (UID: "7160286e-8686-49e8-850f-9bdc283a4a50"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.590267 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.590311 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ql87c\" (UniqueName: \"kubernetes.io/projected/7160286e-8686-49e8-850f-9bdc283a4a50-kube-api-access-ql87c\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.590328 5055 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.590339 5055 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7160286e-8686-49e8-850f-9bdc283a4a50-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.590352 5055 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7160286e-8686-49e8-850f-9bdc283a4a50-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.613052 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7160286e-8686-49e8-850f-9bdc283a4a50" (UID: "7160286e-8686-49e8-850f-9bdc283a4a50"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.626384 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-config-data" (OuterVolumeSpecName: "config-data") pod "7160286e-8686-49e8-850f-9bdc283a4a50" (UID: "7160286e-8686-49e8-850f-9bdc283a4a50"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.692140 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:22 crc kubenswrapper[5055]: I1011 07:15:22.692171 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7160286e-8686-49e8-850f-9bdc283a4a50-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.045585 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.048832 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.048875 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7160286e-8686-49e8-850f-9bdc283a4a50","Type":"ContainerDied","Data":"bbef988c8a62f3c6e4d85f950c06aad68deb511b896cfed32cb368904a01c90e"} Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.048915 5055 scope.go:117] "RemoveContainer" containerID="2dfc44369f1cc20d8ffeb963d8c4f3181d8d04f6e640f9ae10071650e167e566" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.048840 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.070980 5055 scope.go:117] "RemoveContainer" containerID="4d2f8714dd7ca63d6ed1f16a977f4eeb14d2017a21fd69bd2909d022949a7e3e" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.073402 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.080521 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.089550 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.096203 5055 scope.go:117] "RemoveContainer" containerID="50c8bf8be092aa5575e850de8cf541ccd2504f5ef84cb40d39f711bf8f67f741" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.107853 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.131586 5055 scope.go:117] "RemoveContainer" containerID="544482de6387283242f4fb3c7aced088405f9691250b6885a7b699ebd124781e" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.131735 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Oct 11 07:15:23 crc kubenswrapper[5055]: E1011 07:15:23.132191 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="ceilometer-notification-agent" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132209 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="ceilometer-notification-agent" Oct 11 07:15:23 crc kubenswrapper[5055]: E1011 07:15:23.132222 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8eee0b8-baa6-4cbb-a0ee-230b89adb073" containerName="glance-httpd" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132228 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8eee0b8-baa6-4cbb-a0ee-230b89adb073" containerName="glance-httpd" Oct 11 07:15:23 crc kubenswrapper[5055]: E1011 07:15:23.132251 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8eee0b8-baa6-4cbb-a0ee-230b89adb073" containerName="glance-log" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132256 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8eee0b8-baa6-4cbb-a0ee-230b89adb073" containerName="glance-log" Oct 11 07:15:23 crc kubenswrapper[5055]: E1011 07:15:23.132266 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6899c71e-b2d8-45f6-bc9b-e37542b5f544" containerName="mariadb-database-create" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132273 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="6899c71e-b2d8-45f6-bc9b-e37542b5f544" containerName="mariadb-database-create" Oct 11 07:15:23 crc kubenswrapper[5055]: E1011 07:15:23.132292 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="sg-core" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132297 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="sg-core" Oct 11 07:15:23 crc kubenswrapper[5055]: E1011 07:15:23.132309 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="ceilometer-central-agent" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132315 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="ceilometer-central-agent" Oct 11 07:15:23 crc kubenswrapper[5055]: E1011 07:15:23.132322 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="proxy-httpd" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132327 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="proxy-httpd" Oct 11 07:15:23 crc kubenswrapper[5055]: E1011 07:15:23.132339 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1103eafd-fb3f-472a-8a90-2733ef956fe2" containerName="mariadb-database-create" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132344 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="1103eafd-fb3f-472a-8a90-2733ef956fe2" containerName="mariadb-database-create" Oct 11 07:15:23 crc kubenswrapper[5055]: E1011 07:15:23.132352 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="472e70e9-3fa9-4f4d-a1ae-646e04c485f5" containerName="mariadb-database-create" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132357 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="472e70e9-3fa9-4f4d-a1ae-646e04c485f5" containerName="mariadb-database-create" Oct 11 07:15:23 crc kubenswrapper[5055]: E1011 07:15:23.132365 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a420852d-3b6b-43b8-afda-f9e0ce57462d" containerName="cinder-api" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132370 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="a420852d-3b6b-43b8-afda-f9e0ce57462d" containerName="cinder-api" Oct 11 07:15:23 crc kubenswrapper[5055]: E1011 07:15:23.132378 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a420852d-3b6b-43b8-afda-f9e0ce57462d" containerName="cinder-api-log" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132384 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="a420852d-3b6b-43b8-afda-f9e0ce57462d" containerName="cinder-api-log" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132537 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8eee0b8-baa6-4cbb-a0ee-230b89adb073" containerName="glance-httpd" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132548 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="proxy-httpd" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132564 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="6899c71e-b2d8-45f6-bc9b-e37542b5f544" containerName="mariadb-database-create" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132574 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="ceilometer-central-agent" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132580 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="a420852d-3b6b-43b8-afda-f9e0ce57462d" containerName="cinder-api" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132587 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="a420852d-3b6b-43b8-afda-f9e0ce57462d" containerName="cinder-api-log" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132596 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="472e70e9-3fa9-4f4d-a1ae-646e04c485f5" containerName="mariadb-database-create" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132613 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="ceilometer-notification-agent" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132627 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8eee0b8-baa6-4cbb-a0ee-230b89adb073" containerName="glance-log" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132638 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="1103eafd-fb3f-472a-8a90-2733ef956fe2" containerName="mariadb-database-create" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.132650 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" containerName="sg-core" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.133591 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.136339 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.136459 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.139824 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.146955 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.157622 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.171286 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.173941 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.178350 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.178545 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.185482 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.197635 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.205837 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.208188 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.210139 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.213018 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.214428 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226371 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8950328a-3275-4ba9-8bd8-ea4940f2eaac-logs\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226428 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb5fd0d-479c-4110-9933-bb4fc74aca42-run-httpd\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226468 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-scripts\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226505 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-scripts\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226537 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmjln\" (UniqueName: \"kubernetes.io/projected/8950328a-3275-4ba9-8bd8-ea4940f2eaac-kube-api-access-lmjln\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226582 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226607 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-config-data-custom\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226637 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm9xq\" (UniqueName: \"kubernetes.io/projected/ffb5fd0d-479c-4110-9933-bb4fc74aca42-kube-api-access-fm9xq\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226659 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-config-data\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226700 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226729 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8950328a-3275-4ba9-8bd8-ea4940f2eaac-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226780 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-config-data\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226832 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226887 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb5fd0d-479c-4110-9933-bb4fc74aca42-log-httpd\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.226917 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-public-tls-certs\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.227381 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.328472 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.328515 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8950328a-3275-4ba9-8bd8-ea4940f2eaac-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.328538 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.328568 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-config-data\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.328604 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.328611 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8950328a-3275-4ba9-8bd8-ea4940f2eaac-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.328756 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/71344487-c426-47fe-85cb-927c97465a5b-logs\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.328801 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-config-data\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.328849 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb5fd0d-479c-4110-9933-bb4fc74aca42-log-httpd\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.328872 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-public-tls-certs\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.328909 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.328939 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bddg\" (UniqueName: \"kubernetes.io/projected/71344487-c426-47fe-85cb-927c97465a5b-kube-api-access-8bddg\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.328965 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-scripts\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.328987 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8950328a-3275-4ba9-8bd8-ea4940f2eaac-logs\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.329008 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb5fd0d-479c-4110-9933-bb4fc74aca42-run-httpd\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.329032 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-scripts\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.329239 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.329460 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb5fd0d-479c-4110-9933-bb4fc74aca42-log-httpd\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.329603 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb5fd0d-479c-4110-9933-bb4fc74aca42-run-httpd\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.329623 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8950328a-3275-4ba9-8bd8-ea4940f2eaac-logs\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.329690 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/71344487-c426-47fe-85cb-927c97465a5b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.329732 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-scripts\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.329822 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmjln\" (UniqueName: \"kubernetes.io/projected/8950328a-3275-4ba9-8bd8-ea4940f2eaac-kube-api-access-lmjln\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.329891 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.329920 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-config-data-custom\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.329951 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.329977 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm9xq\" (UniqueName: \"kubernetes.io/projected/ffb5fd0d-479c-4110-9933-bb4fc74aca42-kube-api-access-fm9xq\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.329997 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-config-data\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.333993 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-public-tls-certs\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.335703 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.336238 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-scripts\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.337421 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.338951 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.340559 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-config-data\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.343268 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-scripts\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.346593 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.346949 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-config-data-custom\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.348894 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmjln\" (UniqueName: \"kubernetes.io/projected/8950328a-3275-4ba9-8bd8-ea4940f2eaac-kube-api-access-lmjln\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.349796 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm9xq\" (UniqueName: \"kubernetes.io/projected/ffb5fd0d-479c-4110-9933-bb4fc74aca42-kube-api-access-fm9xq\") pod \"ceilometer-0\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.349944 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-config-data\") pod \"cinder-api-0\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.431050 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-scripts\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.431385 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.431412 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/71344487-c426-47fe-85cb-927c97465a5b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.431455 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.431493 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.431536 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/71344487-c426-47fe-85cb-927c97465a5b-logs\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.431562 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-config-data\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.431597 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bddg\" (UniqueName: \"kubernetes.io/projected/71344487-c426-47fe-85cb-927c97465a5b-kube-api-access-8bddg\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.432373 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/71344487-c426-47fe-85cb-927c97465a5b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.432497 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/71344487-c426-47fe-85cb-927c97465a5b-logs\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.432578 5055 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.439371 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-scripts\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.439470 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.442580 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-config-data\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.444686 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.449822 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.452703 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bddg\" (UniqueName: \"kubernetes.io/projected/71344487-c426-47fe-85cb-927c97465a5b-kube-api-access-8bddg\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.477252 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.492542 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.525446 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 07:15:23 crc kubenswrapper[5055]: I1011 07:15:23.937469 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 11 07:15:23 crc kubenswrapper[5055]: W1011 07:15:23.942206 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8950328a_3275_4ba9_8bd8_ea4940f2eaac.slice/crio-f8000c8b2ce09867da6d5d6e824df4d623da61f2d0adbe03de44c59de1e0a250 WatchSource:0}: Error finding container f8000c8b2ce09867da6d5d6e824df4d623da61f2d0adbe03de44c59de1e0a250: Status 404 returned error can't find the container with id f8000c8b2ce09867da6d5d6e824df4d623da61f2d0adbe03de44c59de1e0a250 Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.095805 5055 generic.go:334] "Generic (PLEG): container finished" podID="cc75f86e-fb96-4f2d-a852-66912a678430" containerID="d5026e4621b733cfc25e44d6134706b5fa3663e90b2437e596f40565201fe479" exitCode=0 Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.095934 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"cc75f86e-fb96-4f2d-a852-66912a678430","Type":"ContainerDied","Data":"d5026e4621b733cfc25e44d6134706b5fa3663e90b2437e596f40565201fe479"} Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.099200 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8950328a-3275-4ba9-8bd8-ea4940f2eaac","Type":"ContainerStarted","Data":"f8000c8b2ce09867da6d5d6e824df4d623da61f2d0adbe03de44c59de1e0a250"} Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.103288 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.177368 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.205919 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.353042 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-config-data\") pod \"cc75f86e-fb96-4f2d-a852-66912a678430\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.353168 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc75f86e-fb96-4f2d-a852-66912a678430-logs\") pod \"cc75f86e-fb96-4f2d-a852-66912a678430\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.353241 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-combined-ca-bundle\") pod \"cc75f86e-fb96-4f2d-a852-66912a678430\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.353281 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-internal-tls-certs\") pod \"cc75f86e-fb96-4f2d-a852-66912a678430\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.353302 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"cc75f86e-fb96-4f2d-a852-66912a678430\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.353344 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2p22\" (UniqueName: \"kubernetes.io/projected/cc75f86e-fb96-4f2d-a852-66912a678430-kube-api-access-p2p22\") pod \"cc75f86e-fb96-4f2d-a852-66912a678430\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.353361 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-scripts\") pod \"cc75f86e-fb96-4f2d-a852-66912a678430\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.353410 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cc75f86e-fb96-4f2d-a852-66912a678430-httpd-run\") pod \"cc75f86e-fb96-4f2d-a852-66912a678430\" (UID: \"cc75f86e-fb96-4f2d-a852-66912a678430\") " Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.353750 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc75f86e-fb96-4f2d-a852-66912a678430-logs" (OuterVolumeSpecName: "logs") pod "cc75f86e-fb96-4f2d-a852-66912a678430" (UID: "cc75f86e-fb96-4f2d-a852-66912a678430"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.354043 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc75f86e-fb96-4f2d-a852-66912a678430-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "cc75f86e-fb96-4f2d-a852-66912a678430" (UID: "cc75f86e-fb96-4f2d-a852-66912a678430"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.359188 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc75f86e-fb96-4f2d-a852-66912a678430-kube-api-access-p2p22" (OuterVolumeSpecName: "kube-api-access-p2p22") pod "cc75f86e-fb96-4f2d-a852-66912a678430" (UID: "cc75f86e-fb96-4f2d-a852-66912a678430"). InnerVolumeSpecName "kube-api-access-p2p22". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.359210 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-scripts" (OuterVolumeSpecName: "scripts") pod "cc75f86e-fb96-4f2d-a852-66912a678430" (UID: "cc75f86e-fb96-4f2d-a852-66912a678430"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.362809 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "cc75f86e-fb96-4f2d-a852-66912a678430" (UID: "cc75f86e-fb96-4f2d-a852-66912a678430"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.384020 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc75f86e-fb96-4f2d-a852-66912a678430" (UID: "cc75f86e-fb96-4f2d-a852-66912a678430"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.419854 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "cc75f86e-fb96-4f2d-a852-66912a678430" (UID: "cc75f86e-fb96-4f2d-a852-66912a678430"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.425636 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-config-data" (OuterVolumeSpecName: "config-data") pod "cc75f86e-fb96-4f2d-a852-66912a678430" (UID: "cc75f86e-fb96-4f2d-a852-66912a678430"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.455148 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc75f86e-fb96-4f2d-a852-66912a678430-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.455189 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.455201 5055 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.455236 5055 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.455246 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2p22\" (UniqueName: \"kubernetes.io/projected/cc75f86e-fb96-4f2d-a852-66912a678430-kube-api-access-p2p22\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.455255 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.455261 5055 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cc75f86e-fb96-4f2d-a852-66912a678430-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.455269 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc75f86e-fb96-4f2d-a852-66912a678430-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.477629 5055 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Oct 11 07:15:24 crc kubenswrapper[5055]: I1011 07:15:24.557210 5055 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.016008 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7160286e-8686-49e8-850f-9bdc283a4a50" path="/var/lib/kubelet/pods/7160286e-8686-49e8-850f-9bdc283a4a50/volumes" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.017491 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a420852d-3b6b-43b8-afda-f9e0ce57462d" path="/var/lib/kubelet/pods/a420852d-3b6b-43b8-afda-f9e0ce57462d/volumes" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.019033 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8eee0b8-baa6-4cbb-a0ee-230b89adb073" path="/var/lib/kubelet/pods/b8eee0b8-baa6-4cbb-a0ee-230b89adb073/volumes" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.116303 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb5fd0d-479c-4110-9933-bb4fc74aca42","Type":"ContainerStarted","Data":"7a317cbf9d96ff9a6155cc159a6fbf06531019a0858199108fc7c8dd6849cffd"} Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.117125 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb5fd0d-479c-4110-9933-bb4fc74aca42","Type":"ContainerStarted","Data":"748f68b7167fd228ae1f8a3d1dbf65e50372ad53c78cfdfac197f63d954580d1"} Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.119484 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"71344487-c426-47fe-85cb-927c97465a5b","Type":"ContainerStarted","Data":"fa24be4ccfc52d857c58171af39de8465cca261f4d044114005b686c174d38c4"} Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.120672 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"71344487-c426-47fe-85cb-927c97465a5b","Type":"ContainerStarted","Data":"e2f4befcbc237d4c2d0f0d68a3e7207cb887cd5c2506e76bffacbd7d4b7c9663"} Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.124267 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"cc75f86e-fb96-4f2d-a852-66912a678430","Type":"ContainerDied","Data":"ab3ec22e7b582a44411c57313be600437c73255d5832aae070c316fc9ef8e094"} Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.124302 5055 scope.go:117] "RemoveContainer" containerID="d5026e4621b733cfc25e44d6134706b5fa3663e90b2437e596f40565201fe479" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.124427 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.129051 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8950328a-3275-4ba9-8bd8-ea4940f2eaac","Type":"ContainerStarted","Data":"3d5aad7ccd6e2d170e5b68480b570aef7d997fdd46672f0bca2c424d9434d752"} Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.156393 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.161941 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.173822 5055 scope.go:117] "RemoveContainer" containerID="c9b70bda0cf162db2afe12d3f3dd8a8cb6c6497d562f02b62d86cb61ba89eac8" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.200786 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:15:25 crc kubenswrapper[5055]: E1011 07:15:25.201349 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc75f86e-fb96-4f2d-a852-66912a678430" containerName="glance-log" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.201374 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc75f86e-fb96-4f2d-a852-66912a678430" containerName="glance-log" Oct 11 07:15:25 crc kubenswrapper[5055]: E1011 07:15:25.201409 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc75f86e-fb96-4f2d-a852-66912a678430" containerName="glance-httpd" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.201418 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc75f86e-fb96-4f2d-a852-66912a678430" containerName="glance-httpd" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.201635 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc75f86e-fb96-4f2d-a852-66912a678430" containerName="glance-httpd" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.201661 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc75f86e-fb96-4f2d-a852-66912a678430" containerName="glance-log" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.202658 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.207288 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.207615 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.212455 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.375508 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.375588 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.375849 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50ae0390-2ce4-47fe-88af-831aee265a6c-logs\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.375888 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/50ae0390-2ce4-47fe-88af-831aee265a6c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.375916 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.375954 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwj4r\" (UniqueName: \"kubernetes.io/projected/50ae0390-2ce4-47fe-88af-831aee265a6c-kube-api-access-xwj4r\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.375981 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.376042 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.477831 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.477919 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.477987 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50ae0390-2ce4-47fe-88af-831aee265a6c-logs\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.478016 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/50ae0390-2ce4-47fe-88af-831aee265a6c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.478044 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.478082 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwj4r\" (UniqueName: \"kubernetes.io/projected/50ae0390-2ce4-47fe-88af-831aee265a6c-kube-api-access-xwj4r\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.478107 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.478526 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.480418 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/50ae0390-2ce4-47fe-88af-831aee265a6c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.480795 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50ae0390-2ce4-47fe-88af-831aee265a6c-logs\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.480796 5055 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.485069 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.485379 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.489936 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.493353 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.501574 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwj4r\" (UniqueName: \"kubernetes.io/projected/50ae0390-2ce4-47fe-88af-831aee265a6c-kube-api-access-xwj4r\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.520978 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " pod="openstack/glance-default-internal-api-0" Oct 11 07:15:25 crc kubenswrapper[5055]: I1011 07:15:25.546215 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 07:15:26 crc kubenswrapper[5055]: I1011 07:15:26.142744 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"71344487-c426-47fe-85cb-927c97465a5b","Type":"ContainerStarted","Data":"8445d4a2e1d0c3a546cd9368a1877165d4bf7e98c38114091de01f3e6ca9a695"} Oct 11 07:15:26 crc kubenswrapper[5055]: I1011 07:15:26.146003 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8950328a-3275-4ba9-8bd8-ea4940f2eaac","Type":"ContainerStarted","Data":"ce9f9d6bb4b08df1523515f638c8c0fd3d6d8d86faabc0808907511f9e7af1e3"} Oct 11 07:15:26 crc kubenswrapper[5055]: I1011 07:15:26.146876 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Oct 11 07:15:26 crc kubenswrapper[5055]: I1011 07:15:26.149957 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb5fd0d-479c-4110-9933-bb4fc74aca42","Type":"ContainerStarted","Data":"40ecd3a7eac7c8c3f611bd5c2e7a1817a0e729c41438d03d2fb8ea9153aa6c9e"} Oct 11 07:15:26 crc kubenswrapper[5055]: I1011 07:15:26.177388 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.177374362 podStartE2EDuration="3.177374362s" podCreationTimestamp="2025-10-11 07:15:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:15:26.175008305 +0000 UTC m=+1309.949282112" watchObservedRunningTime="2025-10-11 07:15:26.177374362 +0000 UTC m=+1309.951648169" Oct 11 07:15:26 crc kubenswrapper[5055]: I1011 07:15:26.209448 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.209422898 podStartE2EDuration="3.209422898s" podCreationTimestamp="2025-10-11 07:15:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:15:26.200170067 +0000 UTC m=+1309.974443874" watchObservedRunningTime="2025-10-11 07:15:26.209422898 +0000 UTC m=+1309.983696705" Oct 11 07:15:26 crc kubenswrapper[5055]: I1011 07:15:26.255907 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:15:26 crc kubenswrapper[5055]: I1011 07:15:26.616987 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:15:26 crc kubenswrapper[5055]: I1011 07:15:26.695345 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c78787df7-gwqf7"] Oct 11 07:15:26 crc kubenswrapper[5055]: I1011 07:15:26.695603 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" podUID="5b434027-d1f5-464b-9d67-97324f18ab07" containerName="dnsmasq-dns" containerID="cri-o://92342d3d4da526bf5a408a8a7a62bea0a488b2c2f6f747c73cacee6dc9fb09d9" gracePeriod=10 Oct 11 07:15:26 crc kubenswrapper[5055]: I1011 07:15:26.726833 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Oct 11 07:15:26 crc kubenswrapper[5055]: I1011 07:15:26.807263 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.030002 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc75f86e-fb96-4f2d-a852-66912a678430" path="/var/lib/kubelet/pods/cc75f86e-fb96-4f2d-a852-66912a678430/volumes" Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.172799 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"50ae0390-2ce4-47fe-88af-831aee265a6c","Type":"ContainerStarted","Data":"c1d53b317d7b7a6df70c5f5efdb8793f32b63b9f67d43aa9d015d200b5d8bac5"} Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.173070 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"50ae0390-2ce4-47fe-88af-831aee265a6c","Type":"ContainerStarted","Data":"8bd668e519389fb3424ea402a537236d71ec966542274730f7043c399964922a"} Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.182163 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb5fd0d-479c-4110-9933-bb4fc74aca42","Type":"ContainerStarted","Data":"06a91c4df0c2b72701b2b929fef8ee3be13cbf20f6853cd958d8839cb3bff27b"} Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.187164 5055 generic.go:334] "Generic (PLEG): container finished" podID="5b434027-d1f5-464b-9d67-97324f18ab07" containerID="92342d3d4da526bf5a408a8a7a62bea0a488b2c2f6f747c73cacee6dc9fb09d9" exitCode=0 Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.188017 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" event={"ID":"5b434027-d1f5-464b-9d67-97324f18ab07","Type":"ContainerDied","Data":"92342d3d4da526bf5a408a8a7a62bea0a488b2c2f6f747c73cacee6dc9fb09d9"} Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.188526 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="45e57763-c6df-4335-aecb-0a01fe415f15" containerName="cinder-scheduler" containerID="cri-o://edfa2dc9bfac46e6c01853775728c6388d0edf4c0aabf22de785bc49810e87d4" gracePeriod=30 Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.188839 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="45e57763-c6df-4335-aecb-0a01fe415f15" containerName="probe" containerID="cri-o://c2598293551fd22159528d1558adb9a42faaf9dea96b9844135163f3b8d38805" gracePeriod=30 Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.201808 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.325542 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6d5nq\" (UniqueName: \"kubernetes.io/projected/5b434027-d1f5-464b-9d67-97324f18ab07-kube-api-access-6d5nq\") pod \"5b434027-d1f5-464b-9d67-97324f18ab07\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.325605 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-dns-svc\") pod \"5b434027-d1f5-464b-9d67-97324f18ab07\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.325658 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-ovsdbserver-nb\") pod \"5b434027-d1f5-464b-9d67-97324f18ab07\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.325680 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-ovsdbserver-sb\") pod \"5b434027-d1f5-464b-9d67-97324f18ab07\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.325719 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-config\") pod \"5b434027-d1f5-464b-9d67-97324f18ab07\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.325797 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-dns-swift-storage-0\") pod \"5b434027-d1f5-464b-9d67-97324f18ab07\" (UID: \"5b434027-d1f5-464b-9d67-97324f18ab07\") " Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.334070 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b434027-d1f5-464b-9d67-97324f18ab07-kube-api-access-6d5nq" (OuterVolumeSpecName: "kube-api-access-6d5nq") pod "5b434027-d1f5-464b-9d67-97324f18ab07" (UID: "5b434027-d1f5-464b-9d67-97324f18ab07"). InnerVolumeSpecName "kube-api-access-6d5nq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.383134 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5b434027-d1f5-464b-9d67-97324f18ab07" (UID: "5b434027-d1f5-464b-9d67-97324f18ab07"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.455319 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6d5nq\" (UniqueName: \"kubernetes.io/projected/5b434027-d1f5-464b-9d67-97324f18ab07-kube-api-access-6d5nq\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.455355 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.455319 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-config" (OuterVolumeSpecName: "config") pod "5b434027-d1f5-464b-9d67-97324f18ab07" (UID: "5b434027-d1f5-464b-9d67-97324f18ab07"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.455751 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5b434027-d1f5-464b-9d67-97324f18ab07" (UID: "5b434027-d1f5-464b-9d67-97324f18ab07"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.458277 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5b434027-d1f5-464b-9d67-97324f18ab07" (UID: "5b434027-d1f5-464b-9d67-97324f18ab07"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.458705 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "5b434027-d1f5-464b-9d67-97324f18ab07" (UID: "5b434027-d1f5-464b-9d67-97324f18ab07"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.556709 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.557951 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.557973 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:27 crc kubenswrapper[5055]: I1011 07:15:27.557987 5055 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5b434027-d1f5-464b-9d67-97324f18ab07-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.035068 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-9573-account-create-4kl4d"] Oct 11 07:15:28 crc kubenswrapper[5055]: E1011 07:15:28.035475 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b434027-d1f5-464b-9d67-97324f18ab07" containerName="dnsmasq-dns" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.035490 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b434027-d1f5-464b-9d67-97324f18ab07" containerName="dnsmasq-dns" Oct 11 07:15:28 crc kubenswrapper[5055]: E1011 07:15:28.035508 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b434027-d1f5-464b-9d67-97324f18ab07" containerName="init" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.035514 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b434027-d1f5-464b-9d67-97324f18ab07" containerName="init" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.035664 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b434027-d1f5-464b-9d67-97324f18ab07" containerName="dnsmasq-dns" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.036213 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-9573-account-create-4kl4d" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.038416 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.052563 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-9573-account-create-4kl4d"] Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.168326 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvrg6\" (UniqueName: \"kubernetes.io/projected/9d4f4d49-efc9-44ef-a120-b8eda868d552-kube-api-access-mvrg6\") pod \"nova-api-9573-account-create-4kl4d\" (UID: \"9d4f4d49-efc9-44ef-a120-b8eda868d552\") " pod="openstack/nova-api-9573-account-create-4kl4d" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.199433 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb5fd0d-479c-4110-9933-bb4fc74aca42","Type":"ContainerStarted","Data":"a11178f84b0ff5dc59df70eaf19881623d0805ce98d52ecaee0409582cc73140"} Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.200780 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.204239 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" event={"ID":"5b434027-d1f5-464b-9d67-97324f18ab07","Type":"ContainerDied","Data":"86d19ebf9c91d143d4690ce2b31436511ac8bf8e796d5249a8af0eaf8e6dba82"} Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.204286 5055 scope.go:117] "RemoveContainer" containerID="92342d3d4da526bf5a408a8a7a62bea0a488b2c2f6f747c73cacee6dc9fb09d9" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.204425 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c78787df7-gwqf7" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.210835 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"50ae0390-2ce4-47fe-88af-831aee265a6c","Type":"ContainerStarted","Data":"d7f9eac5a0e6f11b97625c4fa2e0f22db3b2a7d4736d79f64658b64c84d7459c"} Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.216900 5055 generic.go:334] "Generic (PLEG): container finished" podID="45e57763-c6df-4335-aecb-0a01fe415f15" containerID="c2598293551fd22159528d1558adb9a42faaf9dea96b9844135163f3b8d38805" exitCode=0 Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.216956 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"45e57763-c6df-4335-aecb-0a01fe415f15","Type":"ContainerDied","Data":"c2598293551fd22159528d1558adb9a42faaf9dea96b9844135163f3b8d38805"} Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.247122 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-372a-account-create-mzxwb"] Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.248750 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-372a-account-create-mzxwb" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.253836 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.259031 5055 scope.go:117] "RemoveContainer" containerID="3583fcd259a133f88ef08fa8d636ca50304399fd45b1fa0f1cd8d830b5ac3015" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.262927 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.867527583 podStartE2EDuration="5.262903197s" podCreationTimestamp="2025-10-11 07:15:23 +0000 UTC" firstStartedPulling="2025-10-11 07:15:24.151083863 +0000 UTC m=+1307.925357670" lastFinishedPulling="2025-10-11 07:15:27.546459477 +0000 UTC m=+1311.320733284" observedRunningTime="2025-10-11 07:15:28.239012942 +0000 UTC m=+1312.013286749" watchObservedRunningTime="2025-10-11 07:15:28.262903197 +0000 UTC m=+1312.037177004" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.272103 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvrg6\" (UniqueName: \"kubernetes.io/projected/9d4f4d49-efc9-44ef-a120-b8eda868d552-kube-api-access-mvrg6\") pod \"nova-api-9573-account-create-4kl4d\" (UID: \"9d4f4d49-efc9-44ef-a120-b8eda868d552\") " pod="openstack/nova-api-9573-account-create-4kl4d" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.272218 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2bg7\" (UniqueName: \"kubernetes.io/projected/d0129105-ee1c-480f-a857-48e5cd08efd7-kube-api-access-x2bg7\") pod \"nova-cell0-372a-account-create-mzxwb\" (UID: \"d0129105-ee1c-480f-a857-48e5cd08efd7\") " pod="openstack/nova-cell0-372a-account-create-mzxwb" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.289981 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-372a-account-create-mzxwb"] Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.294612 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.294592853 podStartE2EDuration="3.294592853s" podCreationTimestamp="2025-10-11 07:15:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:15:28.270892823 +0000 UTC m=+1312.045166630" watchObservedRunningTime="2025-10-11 07:15:28.294592853 +0000 UTC m=+1312.068866660" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.299697 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvrg6\" (UniqueName: \"kubernetes.io/projected/9d4f4d49-efc9-44ef-a120-b8eda868d552-kube-api-access-mvrg6\") pod \"nova-api-9573-account-create-4kl4d\" (UID: \"9d4f4d49-efc9-44ef-a120-b8eda868d552\") " pod="openstack/nova-api-9573-account-create-4kl4d" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.328955 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c78787df7-gwqf7"] Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.347070 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c78787df7-gwqf7"] Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.351227 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-9573-account-create-4kl4d" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.374359 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2bg7\" (UniqueName: \"kubernetes.io/projected/d0129105-ee1c-480f-a857-48e5cd08efd7-kube-api-access-x2bg7\") pod \"nova-cell0-372a-account-create-mzxwb\" (UID: \"d0129105-ee1c-480f-a857-48e5cd08efd7\") " pod="openstack/nova-cell0-372a-account-create-mzxwb" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.394375 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2bg7\" (UniqueName: \"kubernetes.io/projected/d0129105-ee1c-480f-a857-48e5cd08efd7-kube-api-access-x2bg7\") pod \"nova-cell0-372a-account-create-mzxwb\" (UID: \"d0129105-ee1c-480f-a857-48e5cd08efd7\") " pod="openstack/nova-cell0-372a-account-create-mzxwb" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.436159 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-4c23-account-create-xgh7m"] Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.438661 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4c23-account-create-xgh7m" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.448556 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.454422 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-4c23-account-create-xgh7m"] Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.568297 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-372a-account-create-mzxwb" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.581367 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hsh7\" (UniqueName: \"kubernetes.io/projected/59dafca2-dcd1-430c-8235-33df97f6cb43-kube-api-access-2hsh7\") pod \"nova-cell1-4c23-account-create-xgh7m\" (UID: \"59dafca2-dcd1-430c-8235-33df97f6cb43\") " pod="openstack/nova-cell1-4c23-account-create-xgh7m" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.683307 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hsh7\" (UniqueName: \"kubernetes.io/projected/59dafca2-dcd1-430c-8235-33df97f6cb43-kube-api-access-2hsh7\") pod \"nova-cell1-4c23-account-create-xgh7m\" (UID: \"59dafca2-dcd1-430c-8235-33df97f6cb43\") " pod="openstack/nova-cell1-4c23-account-create-xgh7m" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.744553 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hsh7\" (UniqueName: \"kubernetes.io/projected/59dafca2-dcd1-430c-8235-33df97f6cb43-kube-api-access-2hsh7\") pod \"nova-cell1-4c23-account-create-xgh7m\" (UID: \"59dafca2-dcd1-430c-8235-33df97f6cb43\") " pod="openstack/nova-cell1-4c23-account-create-xgh7m" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.764086 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4c23-account-create-xgh7m" Oct 11 07:15:28 crc kubenswrapper[5055]: I1011 07:15:28.875518 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-9573-account-create-4kl4d"] Oct 11 07:15:29 crc kubenswrapper[5055]: I1011 07:15:29.017879 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b434027-d1f5-464b-9d67-97324f18ab07" path="/var/lib/kubelet/pods/5b434027-d1f5-464b-9d67-97324f18ab07/volumes" Oct 11 07:15:29 crc kubenswrapper[5055]: I1011 07:15:29.180739 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-372a-account-create-mzxwb"] Oct 11 07:15:29 crc kubenswrapper[5055]: I1011 07:15:29.255116 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-372a-account-create-mzxwb" event={"ID":"d0129105-ee1c-480f-a857-48e5cd08efd7","Type":"ContainerStarted","Data":"d7b1133762fcada2b0e9664c594305b101a2cc5a66bcaa191b8f80d7178695a4"} Oct 11 07:15:29 crc kubenswrapper[5055]: I1011 07:15:29.261865 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-9573-account-create-4kl4d" event={"ID":"9d4f4d49-efc9-44ef-a120-b8eda868d552","Type":"ContainerStarted","Data":"ce6c4a40a3a9d2f8beefe4abf12d5b0968a77827509a363909f19a741d4bd9d0"} Oct 11 07:15:29 crc kubenswrapper[5055]: I1011 07:15:29.261901 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-9573-account-create-4kl4d" event={"ID":"9d4f4d49-efc9-44ef-a120-b8eda868d552","Type":"ContainerStarted","Data":"059178d80cd32f5392205fdb9b0b6cd72ce01691a74630e57939d7ac3f7946f6"} Oct 11 07:15:29 crc kubenswrapper[5055]: I1011 07:15:29.374479 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:29 crc kubenswrapper[5055]: I1011 07:15:29.427362 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-4c23-account-create-xgh7m"] Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.273026 5055 generic.go:334] "Generic (PLEG): container finished" podID="d0129105-ee1c-480f-a857-48e5cd08efd7" containerID="e6301e1b39c4a53eec0da2358fd5cf00a686fed2690b06e72389b91e5a219d99" exitCode=0 Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.273108 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-372a-account-create-mzxwb" event={"ID":"d0129105-ee1c-480f-a857-48e5cd08efd7","Type":"ContainerDied","Data":"e6301e1b39c4a53eec0da2358fd5cf00a686fed2690b06e72389b91e5a219d99"} Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.276286 5055 generic.go:334] "Generic (PLEG): container finished" podID="9d4f4d49-efc9-44ef-a120-b8eda868d552" containerID="ce6c4a40a3a9d2f8beefe4abf12d5b0968a77827509a363909f19a741d4bd9d0" exitCode=0 Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.276359 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-9573-account-create-4kl4d" event={"ID":"9d4f4d49-efc9-44ef-a120-b8eda868d552","Type":"ContainerDied","Data":"ce6c4a40a3a9d2f8beefe4abf12d5b0968a77827509a363909f19a741d4bd9d0"} Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.279108 5055 generic.go:334] "Generic (PLEG): container finished" podID="59dafca2-dcd1-430c-8235-33df97f6cb43" containerID="940d3d968c907a34fee8725aa2422f658c7cb351eaa5986851072ce1d5bddcde" exitCode=0 Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.279299 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-4c23-account-create-xgh7m" event={"ID":"59dafca2-dcd1-430c-8235-33df97f6cb43","Type":"ContainerDied","Data":"940d3d968c907a34fee8725aa2422f658c7cb351eaa5986851072ce1d5bddcde"} Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.279402 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-4c23-account-create-xgh7m" event={"ID":"59dafca2-dcd1-430c-8235-33df97f6cb43","Type":"ContainerStarted","Data":"027dedfdf1b8b3c3021bf8a15beffb170c84a2fbd77cf23eabb91a1b87fa5a4e"} Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.728964 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-9573-account-create-4kl4d" Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.735648 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvrg6\" (UniqueName: \"kubernetes.io/projected/9d4f4d49-efc9-44ef-a120-b8eda868d552-kube-api-access-mvrg6\") pod \"9d4f4d49-efc9-44ef-a120-b8eda868d552\" (UID: \"9d4f4d49-efc9-44ef-a120-b8eda868d552\") " Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.738316 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.746471 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4f4d49-efc9-44ef-a120-b8eda868d552-kube-api-access-mvrg6" (OuterVolumeSpecName: "kube-api-access-mvrg6") pod "9d4f4d49-efc9-44ef-a120-b8eda868d552" (UID: "9d4f4d49-efc9-44ef-a120-b8eda868d552"). InnerVolumeSpecName "kube-api-access-mvrg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.837029 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-config-data-custom\") pod \"45e57763-c6df-4335-aecb-0a01fe415f15\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.837131 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhjzv\" (UniqueName: \"kubernetes.io/projected/45e57763-c6df-4335-aecb-0a01fe415f15-kube-api-access-nhjzv\") pod \"45e57763-c6df-4335-aecb-0a01fe415f15\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.837157 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/45e57763-c6df-4335-aecb-0a01fe415f15-etc-machine-id\") pod \"45e57763-c6df-4335-aecb-0a01fe415f15\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.837179 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-config-data\") pod \"45e57763-c6df-4335-aecb-0a01fe415f15\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.837223 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-scripts\") pod \"45e57763-c6df-4335-aecb-0a01fe415f15\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.837238 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-combined-ca-bundle\") pod \"45e57763-c6df-4335-aecb-0a01fe415f15\" (UID: \"45e57763-c6df-4335-aecb-0a01fe415f15\") " Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.837480 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvrg6\" (UniqueName: \"kubernetes.io/projected/9d4f4d49-efc9-44ef-a120-b8eda868d552-kube-api-access-mvrg6\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.837695 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45e57763-c6df-4335-aecb-0a01fe415f15-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "45e57763-c6df-4335-aecb-0a01fe415f15" (UID: "45e57763-c6df-4335-aecb-0a01fe415f15"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.854916 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45e57763-c6df-4335-aecb-0a01fe415f15-kube-api-access-nhjzv" (OuterVolumeSpecName: "kube-api-access-nhjzv") pod "45e57763-c6df-4335-aecb-0a01fe415f15" (UID: "45e57763-c6df-4335-aecb-0a01fe415f15"). InnerVolumeSpecName "kube-api-access-nhjzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.856535 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-scripts" (OuterVolumeSpecName: "scripts") pod "45e57763-c6df-4335-aecb-0a01fe415f15" (UID: "45e57763-c6df-4335-aecb-0a01fe415f15"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.858749 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "45e57763-c6df-4335-aecb-0a01fe415f15" (UID: "45e57763-c6df-4335-aecb-0a01fe415f15"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.901670 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "45e57763-c6df-4335-aecb-0a01fe415f15" (UID: "45e57763-c6df-4335-aecb-0a01fe415f15"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.938707 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhjzv\" (UniqueName: \"kubernetes.io/projected/45e57763-c6df-4335-aecb-0a01fe415f15-kube-api-access-nhjzv\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.938735 5055 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/45e57763-c6df-4335-aecb-0a01fe415f15-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.938744 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.938753 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.938761 5055 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:30 crc kubenswrapper[5055]: I1011 07:15:30.941256 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-config-data" (OuterVolumeSpecName: "config-data") pod "45e57763-c6df-4335-aecb-0a01fe415f15" (UID: "45e57763-c6df-4335-aecb-0a01fe415f15"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.042509 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45e57763-c6df-4335-aecb-0a01fe415f15-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.288945 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-9573-account-create-4kl4d" event={"ID":"9d4f4d49-efc9-44ef-a120-b8eda868d552","Type":"ContainerDied","Data":"059178d80cd32f5392205fdb9b0b6cd72ce01691a74630e57939d7ac3f7946f6"} Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.288991 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="059178d80cd32f5392205fdb9b0b6cd72ce01691a74630e57939d7ac3f7946f6" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.289004 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-9573-account-create-4kl4d" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.292999 5055 generic.go:334] "Generic (PLEG): container finished" podID="45e57763-c6df-4335-aecb-0a01fe415f15" containerID="edfa2dc9bfac46e6c01853775728c6388d0edf4c0aabf22de785bc49810e87d4" exitCode=0 Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.293060 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.293098 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"45e57763-c6df-4335-aecb-0a01fe415f15","Type":"ContainerDied","Data":"edfa2dc9bfac46e6c01853775728c6388d0edf4c0aabf22de785bc49810e87d4"} Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.293145 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"45e57763-c6df-4335-aecb-0a01fe415f15","Type":"ContainerDied","Data":"078437a74396bef91c516e8a346ae6375486c138fc7191914271e30778a11980"} Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.293184 5055 scope.go:117] "RemoveContainer" containerID="c2598293551fd22159528d1558adb9a42faaf9dea96b9844135163f3b8d38805" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.293364 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="sg-core" containerID="cri-o://06a91c4df0c2b72701b2b929fef8ee3be13cbf20f6853cd958d8839cb3bff27b" gracePeriod=30 Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.293488 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="ceilometer-notification-agent" containerID="cri-o://40ecd3a7eac7c8c3f611bd5c2e7a1817a0e729c41438d03d2fb8ea9153aa6c9e" gracePeriod=30 Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.293353 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="ceilometer-central-agent" containerID="cri-o://7a317cbf9d96ff9a6155cc159a6fbf06531019a0858199108fc7c8dd6849cffd" gracePeriod=30 Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.293678 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="proxy-httpd" containerID="cri-o://a11178f84b0ff5dc59df70eaf19881623d0805ce98d52ecaee0409582cc73140" gracePeriod=30 Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.307621 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.337033 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.337713 5055 scope.go:117] "RemoveContainer" containerID="edfa2dc9bfac46e6c01853775728c6388d0edf4c0aabf22de785bc49810e87d4" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.341989 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.371712 5055 scope.go:117] "RemoveContainer" containerID="c2598293551fd22159528d1558adb9a42faaf9dea96b9844135163f3b8d38805" Oct 11 07:15:31 crc kubenswrapper[5055]: E1011 07:15:31.373316 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2598293551fd22159528d1558adb9a42faaf9dea96b9844135163f3b8d38805\": container with ID starting with c2598293551fd22159528d1558adb9a42faaf9dea96b9844135163f3b8d38805 not found: ID does not exist" containerID="c2598293551fd22159528d1558adb9a42faaf9dea96b9844135163f3b8d38805" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.373343 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2598293551fd22159528d1558adb9a42faaf9dea96b9844135163f3b8d38805"} err="failed to get container status \"c2598293551fd22159528d1558adb9a42faaf9dea96b9844135163f3b8d38805\": rpc error: code = NotFound desc = could not find container \"c2598293551fd22159528d1558adb9a42faaf9dea96b9844135163f3b8d38805\": container with ID starting with c2598293551fd22159528d1558adb9a42faaf9dea96b9844135163f3b8d38805 not found: ID does not exist" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.373370 5055 scope.go:117] "RemoveContainer" containerID="edfa2dc9bfac46e6c01853775728c6388d0edf4c0aabf22de785bc49810e87d4" Oct 11 07:15:31 crc kubenswrapper[5055]: E1011 07:15:31.373673 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edfa2dc9bfac46e6c01853775728c6388d0edf4c0aabf22de785bc49810e87d4\": container with ID starting with edfa2dc9bfac46e6c01853775728c6388d0edf4c0aabf22de785bc49810e87d4 not found: ID does not exist" containerID="edfa2dc9bfac46e6c01853775728c6388d0edf4c0aabf22de785bc49810e87d4" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.373693 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edfa2dc9bfac46e6c01853775728c6388d0edf4c0aabf22de785bc49810e87d4"} err="failed to get container status \"edfa2dc9bfac46e6c01853775728c6388d0edf4c0aabf22de785bc49810e87d4\": rpc error: code = NotFound desc = could not find container \"edfa2dc9bfac46e6c01853775728c6388d0edf4c0aabf22de785bc49810e87d4\": container with ID starting with edfa2dc9bfac46e6c01853775728c6388d0edf4c0aabf22de785bc49810e87d4 not found: ID does not exist" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.394502 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 07:15:31 crc kubenswrapper[5055]: E1011 07:15:31.396079 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45e57763-c6df-4335-aecb-0a01fe415f15" containerName="cinder-scheduler" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.396101 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="45e57763-c6df-4335-aecb-0a01fe415f15" containerName="cinder-scheduler" Oct 11 07:15:31 crc kubenswrapper[5055]: E1011 07:15:31.396128 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d4f4d49-efc9-44ef-a120-b8eda868d552" containerName="mariadb-account-create" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.396134 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d4f4d49-efc9-44ef-a120-b8eda868d552" containerName="mariadb-account-create" Oct 11 07:15:31 crc kubenswrapper[5055]: E1011 07:15:31.396158 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45e57763-c6df-4335-aecb-0a01fe415f15" containerName="probe" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.396163 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="45e57763-c6df-4335-aecb-0a01fe415f15" containerName="probe" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.396342 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="45e57763-c6df-4335-aecb-0a01fe415f15" containerName="probe" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.396365 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="45e57763-c6df-4335-aecb-0a01fe415f15" containerName="cinder-scheduler" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.396375 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d4f4d49-efc9-44ef-a120-b8eda868d552" containerName="mariadb-account-create" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.397642 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.406182 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.407579 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.555136 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsw6r\" (UniqueName: \"kubernetes.io/projected/0bc70ff6-8410-4873-8030-2981e62e73f0-kube-api-access-rsw6r\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.555446 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.555597 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0bc70ff6-8410-4873-8030-2981e62e73f0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.555678 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-config-data\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.555718 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-scripts\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.555740 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.656874 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-config-data\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.656925 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-scripts\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.656967 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.656999 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsw6r\" (UniqueName: \"kubernetes.io/projected/0bc70ff6-8410-4873-8030-2981e62e73f0-kube-api-access-rsw6r\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.657021 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.657113 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0bc70ff6-8410-4873-8030-2981e62e73f0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.657192 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0bc70ff6-8410-4873-8030-2981e62e73f0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.663416 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-scripts\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.665163 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-config-data\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.667433 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.667926 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.676474 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsw6r\" (UniqueName: \"kubernetes.io/projected/0bc70ff6-8410-4873-8030-2981e62e73f0-kube-api-access-rsw6r\") pod \"cinder-scheduler-0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.773291 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.917715 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-372a-account-create-mzxwb" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.928150 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4c23-account-create-xgh7m" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.963925 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2hsh7\" (UniqueName: \"kubernetes.io/projected/59dafca2-dcd1-430c-8235-33df97f6cb43-kube-api-access-2hsh7\") pod \"59dafca2-dcd1-430c-8235-33df97f6cb43\" (UID: \"59dafca2-dcd1-430c-8235-33df97f6cb43\") " Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.964230 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2bg7\" (UniqueName: \"kubernetes.io/projected/d0129105-ee1c-480f-a857-48e5cd08efd7-kube-api-access-x2bg7\") pod \"d0129105-ee1c-480f-a857-48e5cd08efd7\" (UID: \"d0129105-ee1c-480f-a857-48e5cd08efd7\") " Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.970405 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0129105-ee1c-480f-a857-48e5cd08efd7-kube-api-access-x2bg7" (OuterVolumeSpecName: "kube-api-access-x2bg7") pod "d0129105-ee1c-480f-a857-48e5cd08efd7" (UID: "d0129105-ee1c-480f-a857-48e5cd08efd7"). InnerVolumeSpecName "kube-api-access-x2bg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:31 crc kubenswrapper[5055]: I1011 07:15:31.971057 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59dafca2-dcd1-430c-8235-33df97f6cb43-kube-api-access-2hsh7" (OuterVolumeSpecName: "kube-api-access-2hsh7") pod "59dafca2-dcd1-430c-8235-33df97f6cb43" (UID: "59dafca2-dcd1-430c-8235-33df97f6cb43"). InnerVolumeSpecName "kube-api-access-2hsh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.066460 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2hsh7\" (UniqueName: \"kubernetes.io/projected/59dafca2-dcd1-430c-8235-33df97f6cb43-kube-api-access-2hsh7\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.066498 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2bg7\" (UniqueName: \"kubernetes.io/projected/d0129105-ee1c-480f-a857-48e5cd08efd7-kube-api-access-x2bg7\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.284741 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.338467 5055 generic.go:334] "Generic (PLEG): container finished" podID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerID="a11178f84b0ff5dc59df70eaf19881623d0805ce98d52ecaee0409582cc73140" exitCode=0 Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.338502 5055 generic.go:334] "Generic (PLEG): container finished" podID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerID="06a91c4df0c2b72701b2b929fef8ee3be13cbf20f6853cd958d8839cb3bff27b" exitCode=2 Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.338511 5055 generic.go:334] "Generic (PLEG): container finished" podID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerID="40ecd3a7eac7c8c3f611bd5c2e7a1817a0e729c41438d03d2fb8ea9153aa6c9e" exitCode=0 Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.338524 5055 generic.go:334] "Generic (PLEG): container finished" podID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerID="7a317cbf9d96ff9a6155cc159a6fbf06531019a0858199108fc7c8dd6849cffd" exitCode=0 Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.338578 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb5fd0d-479c-4110-9933-bb4fc74aca42","Type":"ContainerDied","Data":"a11178f84b0ff5dc59df70eaf19881623d0805ce98d52ecaee0409582cc73140"} Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.338606 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb5fd0d-479c-4110-9933-bb4fc74aca42","Type":"ContainerDied","Data":"06a91c4df0c2b72701b2b929fef8ee3be13cbf20f6853cd958d8839cb3bff27b"} Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.338621 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb5fd0d-479c-4110-9933-bb4fc74aca42","Type":"ContainerDied","Data":"40ecd3a7eac7c8c3f611bd5c2e7a1817a0e729c41438d03d2fb8ea9153aa6c9e"} Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.338632 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb5fd0d-479c-4110-9933-bb4fc74aca42","Type":"ContainerDied","Data":"7a317cbf9d96ff9a6155cc159a6fbf06531019a0858199108fc7c8dd6849cffd"} Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.341394 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-4c23-account-create-xgh7m" event={"ID":"59dafca2-dcd1-430c-8235-33df97f6cb43","Type":"ContainerDied","Data":"027dedfdf1b8b3c3021bf8a15beffb170c84a2fbd77cf23eabb91a1b87fa5a4e"} Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.341500 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="027dedfdf1b8b3c3021bf8a15beffb170c84a2fbd77cf23eabb91a1b87fa5a4e" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.341597 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4c23-account-create-xgh7m" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.344140 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-372a-account-create-mzxwb" event={"ID":"d0129105-ee1c-480f-a857-48e5cd08efd7","Type":"ContainerDied","Data":"d7b1133762fcada2b0e9664c594305b101a2cc5a66bcaa191b8f80d7178695a4"} Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.344183 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d7b1133762fcada2b0e9664c594305b101a2cc5a66bcaa191b8f80d7178695a4" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.344250 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-372a-account-create-mzxwb" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.395877 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.421720 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.421798 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.421853 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.422601 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"28fb3395db8b46734147bf5f696c45faf64d183a1700bf2cea197e5b447049b3"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.422667 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://28fb3395db8b46734147bf5f696c45faf64d183a1700bf2cea197e5b447049b3" gracePeriod=600 Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.573466 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fm9xq\" (UniqueName: \"kubernetes.io/projected/ffb5fd0d-479c-4110-9933-bb4fc74aca42-kube-api-access-fm9xq\") pod \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.573536 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb5fd0d-479c-4110-9933-bb4fc74aca42-log-httpd\") pod \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.573581 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb5fd0d-479c-4110-9933-bb4fc74aca42-run-httpd\") pod \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.573608 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-combined-ca-bundle\") pod \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.573744 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-config-data\") pod \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.573802 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-sg-core-conf-yaml\") pod \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.573845 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-scripts\") pod \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\" (UID: \"ffb5fd0d-479c-4110-9933-bb4fc74aca42\") " Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.574237 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffb5fd0d-479c-4110-9933-bb4fc74aca42-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ffb5fd0d-479c-4110-9933-bb4fc74aca42" (UID: "ffb5fd0d-479c-4110-9933-bb4fc74aca42"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.575035 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffb5fd0d-479c-4110-9933-bb4fc74aca42-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ffb5fd0d-479c-4110-9933-bb4fc74aca42" (UID: "ffb5fd0d-479c-4110-9933-bb4fc74aca42"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.578882 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-scripts" (OuterVolumeSpecName: "scripts") pod "ffb5fd0d-479c-4110-9933-bb4fc74aca42" (UID: "ffb5fd0d-479c-4110-9933-bb4fc74aca42"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.579453 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffb5fd0d-479c-4110-9933-bb4fc74aca42-kube-api-access-fm9xq" (OuterVolumeSpecName: "kube-api-access-fm9xq") pod "ffb5fd0d-479c-4110-9933-bb4fc74aca42" (UID: "ffb5fd0d-479c-4110-9933-bb4fc74aca42"). InnerVolumeSpecName "kube-api-access-fm9xq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.608176 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ffb5fd0d-479c-4110-9933-bb4fc74aca42" (UID: "ffb5fd0d-479c-4110-9933-bb4fc74aca42"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.678789 5055 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.678847 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.678865 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fm9xq\" (UniqueName: \"kubernetes.io/projected/ffb5fd0d-479c-4110-9933-bb4fc74aca42-kube-api-access-fm9xq\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.678879 5055 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb5fd0d-479c-4110-9933-bb4fc74aca42-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.678890 5055 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffb5fd0d-479c-4110-9933-bb4fc74aca42-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.691002 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ffb5fd0d-479c-4110-9933-bb4fc74aca42" (UID: "ffb5fd0d-479c-4110-9933-bb4fc74aca42"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.715912 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-config-data" (OuterVolumeSpecName: "config-data") pod "ffb5fd0d-479c-4110-9933-bb4fc74aca42" (UID: "ffb5fd0d-479c-4110-9933-bb4fc74aca42"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.781449 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:32 crc kubenswrapper[5055]: I1011 07:15:32.781649 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffb5fd0d-479c-4110-9933-bb4fc74aca42-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.006709 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45e57763-c6df-4335-aecb-0a01fe415f15" path="/var/lib/kubelet/pods/45e57763-c6df-4335-aecb-0a01fe415f15/volumes" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.365741 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffb5fd0d-479c-4110-9933-bb4fc74aca42","Type":"ContainerDied","Data":"748f68b7167fd228ae1f8a3d1dbf65e50372ad53c78cfdfac197f63d954580d1"} Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.366030 5055 scope.go:117] "RemoveContainer" containerID="a11178f84b0ff5dc59df70eaf19881623d0805ce98d52ecaee0409582cc73140" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.366263 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.376785 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="28fb3395db8b46734147bf5f696c45faf64d183a1700bf2cea197e5b447049b3" exitCode=0 Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.377926 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"28fb3395db8b46734147bf5f696c45faf64d183a1700bf2cea197e5b447049b3"} Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.377973 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117"} Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.385157 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0bc70ff6-8410-4873-8030-2981e62e73f0","Type":"ContainerStarted","Data":"24a4cd53f46b06fb18f2255bef83ecdbf5f9ae979c0de1b64947a5d04a3b01bc"} Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.385210 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0bc70ff6-8410-4873-8030-2981e62e73f0","Type":"ContainerStarted","Data":"209dcfde6ff95c571105b7b1888d52894edf8e04d2e926dc08c6cf9f71baa1ca"} Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.432209 5055 scope.go:117] "RemoveContainer" containerID="06a91c4df0c2b72701b2b929fef8ee3be13cbf20f6853cd958d8839cb3bff27b" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.448637 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.481010 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.499177 5055 scope.go:117] "RemoveContainer" containerID="40ecd3a7eac7c8c3f611bd5c2e7a1817a0e729c41438d03d2fb8ea9153aa6c9e" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.527206 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.528914 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.553586 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:33 crc kubenswrapper[5055]: E1011 07:15:33.553975 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="ceilometer-notification-agent" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.553987 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="ceilometer-notification-agent" Oct 11 07:15:33 crc kubenswrapper[5055]: E1011 07:15:33.553996 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0129105-ee1c-480f-a857-48e5cd08efd7" containerName="mariadb-account-create" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.554002 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0129105-ee1c-480f-a857-48e5cd08efd7" containerName="mariadb-account-create" Oct 11 07:15:33 crc kubenswrapper[5055]: E1011 07:15:33.554014 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="ceilometer-central-agent" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.554020 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="ceilometer-central-agent" Oct 11 07:15:33 crc kubenswrapper[5055]: E1011 07:15:33.554036 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59dafca2-dcd1-430c-8235-33df97f6cb43" containerName="mariadb-account-create" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.554041 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="59dafca2-dcd1-430c-8235-33df97f6cb43" containerName="mariadb-account-create" Oct 11 07:15:33 crc kubenswrapper[5055]: E1011 07:15:33.554056 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="proxy-httpd" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.554061 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="proxy-httpd" Oct 11 07:15:33 crc kubenswrapper[5055]: E1011 07:15:33.554074 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="sg-core" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.554079 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="sg-core" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.554277 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0129105-ee1c-480f-a857-48e5cd08efd7" containerName="mariadb-account-create" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.554291 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="ceilometer-notification-agent" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.554302 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="ceilometer-central-agent" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.554313 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="proxy-httpd" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.554324 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="59dafca2-dcd1-430c-8235-33df97f6cb43" containerName="mariadb-account-create" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.554341 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" containerName="sg-core" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.562089 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.568188 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.568267 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.590012 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.593173 5055 scope.go:117] "RemoveContainer" containerID="7a317cbf9d96ff9a6155cc159a6fbf06531019a0858199108fc7c8dd6849cffd" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.619219 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-94mmk"] Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.620707 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.627411 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.627841 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-5kjrx" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.628131 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.635130 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.636215 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwk5r\" (UniqueName: \"kubernetes.io/projected/936f513e-ae54-458c-8dfc-52eb27c8d5f6-kube-api-access-fwk5r\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.636868 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.636922 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-scripts\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.636949 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.637012 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936f513e-ae54-458c-8dfc-52eb27c8d5f6-run-httpd\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.637070 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936f513e-ae54-458c-8dfc-52eb27c8d5f6-log-httpd\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.637118 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-config-data\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.646960 5055 scope.go:117] "RemoveContainer" containerID="535dac1f0973003af975be73a91e8fd4d0110e656b0a5fa5041beaefb3cb8f58" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.654591 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.659067 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-94mmk"] Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.742114 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936f513e-ae54-458c-8dfc-52eb27c8d5f6-run-httpd\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.742186 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936f513e-ae54-458c-8dfc-52eb27c8d5f6-log-httpd\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.742219 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-config-data\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.742305 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwk5r\" (UniqueName: \"kubernetes.io/projected/936f513e-ae54-458c-8dfc-52eb27c8d5f6-kube-api-access-fwk5r\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.742350 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.742383 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-scripts\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.742402 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.744307 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936f513e-ae54-458c-8dfc-52eb27c8d5f6-run-httpd\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.744337 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936f513e-ae54-458c-8dfc-52eb27c8d5f6-log-httpd\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.750079 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.750897 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-config-data\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.755086 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.762267 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwk5r\" (UniqueName: \"kubernetes.io/projected/936f513e-ae54-458c-8dfc-52eb27c8d5f6-kube-api-access-fwk5r\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.783437 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-scripts\") pod \"ceilometer-0\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.844077 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-94mmk\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.844405 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnvs6\" (UniqueName: \"kubernetes.io/projected/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-kube-api-access-wnvs6\") pod \"nova-cell0-conductor-db-sync-94mmk\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.844439 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-config-data\") pod \"nova-cell0-conductor-db-sync-94mmk\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.844484 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-scripts\") pod \"nova-cell0-conductor-db-sync-94mmk\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.906004 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.945750 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-config-data\") pod \"nova-cell0-conductor-db-sync-94mmk\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.946026 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-scripts\") pod \"nova-cell0-conductor-db-sync-94mmk\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.946254 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-94mmk\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.946361 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnvs6\" (UniqueName: \"kubernetes.io/projected/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-kube-api-access-wnvs6\") pod \"nova-cell0-conductor-db-sync-94mmk\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.952438 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-config-data\") pod \"nova-cell0-conductor-db-sync-94mmk\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.953053 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-scripts\") pod \"nova-cell0-conductor-db-sync-94mmk\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.953087 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-94mmk\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:33 crc kubenswrapper[5055]: I1011 07:15:33.963535 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnvs6\" (UniqueName: \"kubernetes.io/projected/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-kube-api-access-wnvs6\") pod \"nova-cell0-conductor-db-sync-94mmk\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:34 crc kubenswrapper[5055]: I1011 07:15:34.001091 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:34 crc kubenswrapper[5055]: I1011 07:15:34.402263 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0bc70ff6-8410-4873-8030-2981e62e73f0","Type":"ContainerStarted","Data":"1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad"} Oct 11 07:15:34 crc kubenswrapper[5055]: I1011 07:15:34.402650 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 11 07:15:34 crc kubenswrapper[5055]: I1011 07:15:34.402669 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 11 07:15:34 crc kubenswrapper[5055]: I1011 07:15:34.420646 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.420629815 podStartE2EDuration="3.420629815s" podCreationTimestamp="2025-10-11 07:15:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:15:34.419135423 +0000 UTC m=+1318.193409230" watchObservedRunningTime="2025-10-11 07:15:34.420629815 +0000 UTC m=+1318.194903622" Oct 11 07:15:34 crc kubenswrapper[5055]: W1011 07:15:34.448757 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod936f513e_ae54_458c_8dfc_52eb27c8d5f6.slice/crio-a0eaacb7b2579b91d11d27de27597d4849eec3805cf12b466e296534e3afdea9 WatchSource:0}: Error finding container a0eaacb7b2579b91d11d27de27597d4849eec3805cf12b466e296534e3afdea9: Status 404 returned error can't find the container with id a0eaacb7b2579b91d11d27de27597d4849eec3805cf12b466e296534e3afdea9 Oct 11 07:15:34 crc kubenswrapper[5055]: I1011 07:15:34.449632 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:34 crc kubenswrapper[5055]: I1011 07:15:34.564203 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-94mmk"] Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.018941 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffb5fd0d-479c-4110-9933-bb4fc74aca42" path="/var/lib/kubelet/pods/ffb5fd0d-479c-4110-9933-bb4fc74aca42/volumes" Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.055454 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.141691 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-56f4bfc5b4-slbvq"] Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.142005 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-56f4bfc5b4-slbvq" podUID="c880aae1-2230-47fb-94b5-5b7337a19cbe" containerName="neutron-api" containerID="cri-o://bcefa1337db59c62d82d9b6051c7a9a7fb0c7d557213ca7b73cc40717a422007" gracePeriod=30 Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.142146 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-56f4bfc5b4-slbvq" podUID="c880aae1-2230-47fb-94b5-5b7337a19cbe" containerName="neutron-httpd" containerID="cri-o://496c1b180ccb5531e4ccf2e5df4ffcf02ff7be5ad22305816076c93aef0bf253" gracePeriod=30 Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.463283 5055 generic.go:334] "Generic (PLEG): container finished" podID="c880aae1-2230-47fb-94b5-5b7337a19cbe" containerID="496c1b180ccb5531e4ccf2e5df4ffcf02ff7be5ad22305816076c93aef0bf253" exitCode=0 Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.463537 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-56f4bfc5b4-slbvq" event={"ID":"c880aae1-2230-47fb-94b5-5b7337a19cbe","Type":"ContainerDied","Data":"496c1b180ccb5531e4ccf2e5df4ffcf02ff7be5ad22305816076c93aef0bf253"} Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.465467 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-94mmk" event={"ID":"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d","Type":"ContainerStarted","Data":"c01fd74d8ce822e76a0fdeec132e24db41087f2d2aff9b7f3a6b0007a7fd7b4b"} Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.480169 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936f513e-ae54-458c-8dfc-52eb27c8d5f6","Type":"ContainerStarted","Data":"db57e67379770490a8db7f2d2642fb089364b9b70775292e10e1232599efb02a"} Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.480236 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936f513e-ae54-458c-8dfc-52eb27c8d5f6","Type":"ContainerStarted","Data":"a0eaacb7b2579b91d11d27de27597d4849eec3805cf12b466e296534e3afdea9"} Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.547965 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.548315 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.611147 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.611506 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 11 07:15:35 crc kubenswrapper[5055]: I1011 07:15:35.919327 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Oct 11 07:15:36 crc kubenswrapper[5055]: I1011 07:15:36.503892 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936f513e-ae54-458c-8dfc-52eb27c8d5f6","Type":"ContainerStarted","Data":"dc5e3e9c31f7e3807ca0c036e7cf63001e5268b174b4ee2dc45fad0916db8ead"} Oct 11 07:15:36 crc kubenswrapper[5055]: I1011 07:15:36.504372 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 11 07:15:36 crc kubenswrapper[5055]: I1011 07:15:36.504571 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 11 07:15:36 crc kubenswrapper[5055]: I1011 07:15:36.775268 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Oct 11 07:15:37 crc kubenswrapper[5055]: I1011 07:15:37.010320 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 11 07:15:37 crc kubenswrapper[5055]: I1011 07:15:37.010401 5055 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 07:15:37 crc kubenswrapper[5055]: I1011 07:15:37.013313 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 11 07:15:37 crc kubenswrapper[5055]: I1011 07:15:37.520546 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936f513e-ae54-458c-8dfc-52eb27c8d5f6","Type":"ContainerStarted","Data":"c20dfd509e9657fbfbe4d947ae153763a2644f603e9f7401abe2e505c4395247"} Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.462903 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.532387 5055 generic.go:334] "Generic (PLEG): container finished" podID="c880aae1-2230-47fb-94b5-5b7337a19cbe" containerID="bcefa1337db59c62d82d9b6051c7a9a7fb0c7d557213ca7b73cc40717a422007" exitCode=0 Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.532433 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-56f4bfc5b4-slbvq" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.532435 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-56f4bfc5b4-slbvq" event={"ID":"c880aae1-2230-47fb-94b5-5b7337a19cbe","Type":"ContainerDied","Data":"bcefa1337db59c62d82d9b6051c7a9a7fb0c7d557213ca7b73cc40717a422007"} Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.532493 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-56f4bfc5b4-slbvq" event={"ID":"c880aae1-2230-47fb-94b5-5b7337a19cbe","Type":"ContainerDied","Data":"1ec92de3bd7a581223542a45214a9c5509718b31db79fc2f9f121d24797f6f9e"} Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.532513 5055 scope.go:117] "RemoveContainer" containerID="496c1b180ccb5531e4ccf2e5df4ffcf02ff7be5ad22305816076c93aef0bf253" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.535433 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936f513e-ae54-458c-8dfc-52eb27c8d5f6","Type":"ContainerStarted","Data":"95afa68b9b211e55c93dab21cfe9c3ec5153cb8e9db8789b19169d63125d5fee"} Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.535552 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.553287 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-ovndb-tls-certs\") pod \"c880aae1-2230-47fb-94b5-5b7337a19cbe\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.553356 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-combined-ca-bundle\") pod \"c880aae1-2230-47fb-94b5-5b7337a19cbe\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.553389 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-config\") pod \"c880aae1-2230-47fb-94b5-5b7337a19cbe\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.553464 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-httpd-config\") pod \"c880aae1-2230-47fb-94b5-5b7337a19cbe\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.553651 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6f9w\" (UniqueName: \"kubernetes.io/projected/c880aae1-2230-47fb-94b5-5b7337a19cbe-kube-api-access-l6f9w\") pod \"c880aae1-2230-47fb-94b5-5b7337a19cbe\" (UID: \"c880aae1-2230-47fb-94b5-5b7337a19cbe\") " Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.566881 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "c880aae1-2230-47fb-94b5-5b7337a19cbe" (UID: "c880aae1-2230-47fb-94b5-5b7337a19cbe"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.570894 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.598826753 podStartE2EDuration="5.570873266s" podCreationTimestamp="2025-10-11 07:15:33 +0000 UTC" firstStartedPulling="2025-10-11 07:15:34.450435908 +0000 UTC m=+1318.224709705" lastFinishedPulling="2025-10-11 07:15:37.422482411 +0000 UTC m=+1321.196756218" observedRunningTime="2025-10-11 07:15:38.561079249 +0000 UTC m=+1322.335353056" watchObservedRunningTime="2025-10-11 07:15:38.570873266 +0000 UTC m=+1322.345147083" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.579181 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c880aae1-2230-47fb-94b5-5b7337a19cbe-kube-api-access-l6f9w" (OuterVolumeSpecName: "kube-api-access-l6f9w") pod "c880aae1-2230-47fb-94b5-5b7337a19cbe" (UID: "c880aae1-2230-47fb-94b5-5b7337a19cbe"). InnerVolumeSpecName "kube-api-access-l6f9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.598612 5055 scope.go:117] "RemoveContainer" containerID="bcefa1337db59c62d82d9b6051c7a9a7fb0c7d557213ca7b73cc40717a422007" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.619468 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c880aae1-2230-47fb-94b5-5b7337a19cbe" (UID: "c880aae1-2230-47fb-94b5-5b7337a19cbe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.652923 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-config" (OuterVolumeSpecName: "config") pod "c880aae1-2230-47fb-94b5-5b7337a19cbe" (UID: "c880aae1-2230-47fb-94b5-5b7337a19cbe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.655835 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "c880aae1-2230-47fb-94b5-5b7337a19cbe" (UID: "c880aae1-2230-47fb-94b5-5b7337a19cbe"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.662072 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6f9w\" (UniqueName: \"kubernetes.io/projected/c880aae1-2230-47fb-94b5-5b7337a19cbe-kube-api-access-l6f9w\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.662097 5055 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.662106 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.662117 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.662127 5055 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c880aae1-2230-47fb-94b5-5b7337a19cbe-httpd-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.677716 5055 scope.go:117] "RemoveContainer" containerID="496c1b180ccb5531e4ccf2e5df4ffcf02ff7be5ad22305816076c93aef0bf253" Oct 11 07:15:38 crc kubenswrapper[5055]: E1011 07:15:38.678071 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"496c1b180ccb5531e4ccf2e5df4ffcf02ff7be5ad22305816076c93aef0bf253\": container with ID starting with 496c1b180ccb5531e4ccf2e5df4ffcf02ff7be5ad22305816076c93aef0bf253 not found: ID does not exist" containerID="496c1b180ccb5531e4ccf2e5df4ffcf02ff7be5ad22305816076c93aef0bf253" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.679094 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"496c1b180ccb5531e4ccf2e5df4ffcf02ff7be5ad22305816076c93aef0bf253"} err="failed to get container status \"496c1b180ccb5531e4ccf2e5df4ffcf02ff7be5ad22305816076c93aef0bf253\": rpc error: code = NotFound desc = could not find container \"496c1b180ccb5531e4ccf2e5df4ffcf02ff7be5ad22305816076c93aef0bf253\": container with ID starting with 496c1b180ccb5531e4ccf2e5df4ffcf02ff7be5ad22305816076c93aef0bf253 not found: ID does not exist" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.679144 5055 scope.go:117] "RemoveContainer" containerID="bcefa1337db59c62d82d9b6051c7a9a7fb0c7d557213ca7b73cc40717a422007" Oct 11 07:15:38 crc kubenswrapper[5055]: E1011 07:15:38.680968 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcefa1337db59c62d82d9b6051c7a9a7fb0c7d557213ca7b73cc40717a422007\": container with ID starting with bcefa1337db59c62d82d9b6051c7a9a7fb0c7d557213ca7b73cc40717a422007 not found: ID does not exist" containerID="bcefa1337db59c62d82d9b6051c7a9a7fb0c7d557213ca7b73cc40717a422007" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.681003 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcefa1337db59c62d82d9b6051c7a9a7fb0c7d557213ca7b73cc40717a422007"} err="failed to get container status \"bcefa1337db59c62d82d9b6051c7a9a7fb0c7d557213ca7b73cc40717a422007\": rpc error: code = NotFound desc = could not find container \"bcefa1337db59c62d82d9b6051c7a9a7fb0c7d557213ca7b73cc40717a422007\": container with ID starting with bcefa1337db59c62d82d9b6051c7a9a7fb0c7d557213ca7b73cc40717a422007 not found: ID does not exist" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.854075 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.854172 5055 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.871391 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-56f4bfc5b4-slbvq"] Oct 11 07:15:38 crc kubenswrapper[5055]: I1011 07:15:38.881388 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-56f4bfc5b4-slbvq"] Oct 11 07:15:39 crc kubenswrapper[5055]: I1011 07:15:39.011745 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c880aae1-2230-47fb-94b5-5b7337a19cbe" path="/var/lib/kubelet/pods/c880aae1-2230-47fb-94b5-5b7337a19cbe/volumes" Oct 11 07:15:39 crc kubenswrapper[5055]: I1011 07:15:39.217118 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 11 07:15:41 crc kubenswrapper[5055]: I1011 07:15:41.986276 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Oct 11 07:15:44 crc kubenswrapper[5055]: I1011 07:15:44.594616 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-94mmk" event={"ID":"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d","Type":"ContainerStarted","Data":"84af3482a1b5087b3f9fce032a75fe9ea67e0df26b99966077869dc24981e2fa"} Oct 11 07:15:44 crc kubenswrapper[5055]: I1011 07:15:44.614879 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-94mmk" podStartSLOduration=1.998563788 podStartE2EDuration="11.614857688s" podCreationTimestamp="2025-10-11 07:15:33 +0000 UTC" firstStartedPulling="2025-10-11 07:15:34.566876031 +0000 UTC m=+1318.341149838" lastFinishedPulling="2025-10-11 07:15:44.183169931 +0000 UTC m=+1327.957443738" observedRunningTime="2025-10-11 07:15:44.607928502 +0000 UTC m=+1328.382202329" watchObservedRunningTime="2025-10-11 07:15:44.614857688 +0000 UTC m=+1328.389131495" Oct 11 07:15:45 crc kubenswrapper[5055]: I1011 07:15:45.005245 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:45 crc kubenswrapper[5055]: I1011 07:15:45.005569 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="ceilometer-central-agent" containerID="cri-o://db57e67379770490a8db7f2d2642fb089364b9b70775292e10e1232599efb02a" gracePeriod=30 Oct 11 07:15:45 crc kubenswrapper[5055]: I1011 07:15:45.005639 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="sg-core" containerID="cri-o://c20dfd509e9657fbfbe4d947ae153763a2644f603e9f7401abe2e505c4395247" gracePeriod=30 Oct 11 07:15:45 crc kubenswrapper[5055]: I1011 07:15:45.005666 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="ceilometer-notification-agent" containerID="cri-o://dc5e3e9c31f7e3807ca0c036e7cf63001e5268b174b4ee2dc45fad0916db8ead" gracePeriod=30 Oct 11 07:15:45 crc kubenswrapper[5055]: I1011 07:15:45.005651 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="proxy-httpd" containerID="cri-o://95afa68b9b211e55c93dab21cfe9c3ec5153cb8e9db8789b19169d63125d5fee" gracePeriod=30 Oct 11 07:15:45 crc kubenswrapper[5055]: I1011 07:15:45.605879 5055 generic.go:334] "Generic (PLEG): container finished" podID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerID="95afa68b9b211e55c93dab21cfe9c3ec5153cb8e9db8789b19169d63125d5fee" exitCode=0 Oct 11 07:15:45 crc kubenswrapper[5055]: I1011 07:15:45.606410 5055 generic.go:334] "Generic (PLEG): container finished" podID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerID="c20dfd509e9657fbfbe4d947ae153763a2644f603e9f7401abe2e505c4395247" exitCode=2 Oct 11 07:15:45 crc kubenswrapper[5055]: I1011 07:15:45.606419 5055 generic.go:334] "Generic (PLEG): container finished" podID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerID="db57e67379770490a8db7f2d2642fb089364b9b70775292e10e1232599efb02a" exitCode=0 Oct 11 07:15:45 crc kubenswrapper[5055]: I1011 07:15:45.606161 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936f513e-ae54-458c-8dfc-52eb27c8d5f6","Type":"ContainerDied","Data":"95afa68b9b211e55c93dab21cfe9c3ec5153cb8e9db8789b19169d63125d5fee"} Oct 11 07:15:45 crc kubenswrapper[5055]: I1011 07:15:45.606470 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936f513e-ae54-458c-8dfc-52eb27c8d5f6","Type":"ContainerDied","Data":"c20dfd509e9657fbfbe4d947ae153763a2644f603e9f7401abe2e505c4395247"} Oct 11 07:15:45 crc kubenswrapper[5055]: I1011 07:15:45.606487 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936f513e-ae54-458c-8dfc-52eb27c8d5f6","Type":"ContainerDied","Data":"db57e67379770490a8db7f2d2642fb089364b9b70775292e10e1232599efb02a"} Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.619204 5055 generic.go:334] "Generic (PLEG): container finished" podID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerID="dc5e3e9c31f7e3807ca0c036e7cf63001e5268b174b4ee2dc45fad0916db8ead" exitCode=0 Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.619314 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936f513e-ae54-458c-8dfc-52eb27c8d5f6","Type":"ContainerDied","Data":"dc5e3e9c31f7e3807ca0c036e7cf63001e5268b174b4ee2dc45fad0916db8ead"} Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.706913 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.808422 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936f513e-ae54-458c-8dfc-52eb27c8d5f6-log-httpd\") pod \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.808522 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-sg-core-conf-yaml\") pod \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.808640 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936f513e-ae54-458c-8dfc-52eb27c8d5f6-run-httpd\") pod \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.808674 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-scripts\") pod \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.808702 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-config-data\") pod \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.808790 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-combined-ca-bundle\") pod \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.808811 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fwk5r\" (UniqueName: \"kubernetes.io/projected/936f513e-ae54-458c-8dfc-52eb27c8d5f6-kube-api-access-fwk5r\") pod \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\" (UID: \"936f513e-ae54-458c-8dfc-52eb27c8d5f6\") " Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.809391 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/936f513e-ae54-458c-8dfc-52eb27c8d5f6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "936f513e-ae54-458c-8dfc-52eb27c8d5f6" (UID: "936f513e-ae54-458c-8dfc-52eb27c8d5f6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.809560 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/936f513e-ae54-458c-8dfc-52eb27c8d5f6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "936f513e-ae54-458c-8dfc-52eb27c8d5f6" (UID: "936f513e-ae54-458c-8dfc-52eb27c8d5f6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.810050 5055 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936f513e-ae54-458c-8dfc-52eb27c8d5f6-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.810080 5055 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/936f513e-ae54-458c-8dfc-52eb27c8d5f6-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.815947 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/936f513e-ae54-458c-8dfc-52eb27c8d5f6-kube-api-access-fwk5r" (OuterVolumeSpecName: "kube-api-access-fwk5r") pod "936f513e-ae54-458c-8dfc-52eb27c8d5f6" (UID: "936f513e-ae54-458c-8dfc-52eb27c8d5f6"). InnerVolumeSpecName "kube-api-access-fwk5r". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.817714 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-scripts" (OuterVolumeSpecName: "scripts") pod "936f513e-ae54-458c-8dfc-52eb27c8d5f6" (UID: "936f513e-ae54-458c-8dfc-52eb27c8d5f6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.851845 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "936f513e-ae54-458c-8dfc-52eb27c8d5f6" (UID: "936f513e-ae54-458c-8dfc-52eb27c8d5f6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.885051 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "936f513e-ae54-458c-8dfc-52eb27c8d5f6" (UID: "936f513e-ae54-458c-8dfc-52eb27c8d5f6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.901653 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-config-data" (OuterVolumeSpecName: "config-data") pod "936f513e-ae54-458c-8dfc-52eb27c8d5f6" (UID: "936f513e-ae54-458c-8dfc-52eb27c8d5f6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.912075 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.912114 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.912129 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fwk5r\" (UniqueName: \"kubernetes.io/projected/936f513e-ae54-458c-8dfc-52eb27c8d5f6-kube-api-access-fwk5r\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.912140 5055 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:46 crc kubenswrapper[5055]: I1011 07:15:46.912150 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/936f513e-ae54-458c-8dfc-52eb27c8d5f6-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.631922 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"936f513e-ae54-458c-8dfc-52eb27c8d5f6","Type":"ContainerDied","Data":"a0eaacb7b2579b91d11d27de27597d4849eec3805cf12b466e296534e3afdea9"} Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.632257 5055 scope.go:117] "RemoveContainer" containerID="95afa68b9b211e55c93dab21cfe9c3ec5153cb8e9db8789b19169d63125d5fee" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.632006 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.666598 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.673423 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.692754 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:47 crc kubenswrapper[5055]: E1011 07:15:47.693562 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="ceilometer-central-agent" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.693585 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="ceilometer-central-agent" Oct 11 07:15:47 crc kubenswrapper[5055]: E1011 07:15:47.693617 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="ceilometer-notification-agent" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.693625 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="ceilometer-notification-agent" Oct 11 07:15:47 crc kubenswrapper[5055]: E1011 07:15:47.693635 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="proxy-httpd" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.693643 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="proxy-httpd" Oct 11 07:15:47 crc kubenswrapper[5055]: E1011 07:15:47.693656 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c880aae1-2230-47fb-94b5-5b7337a19cbe" containerName="neutron-httpd" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.693662 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c880aae1-2230-47fb-94b5-5b7337a19cbe" containerName="neutron-httpd" Oct 11 07:15:47 crc kubenswrapper[5055]: E1011 07:15:47.693673 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c880aae1-2230-47fb-94b5-5b7337a19cbe" containerName="neutron-api" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.693680 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c880aae1-2230-47fb-94b5-5b7337a19cbe" containerName="neutron-api" Oct 11 07:15:47 crc kubenswrapper[5055]: E1011 07:15:47.693693 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="sg-core" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.693699 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="sg-core" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.693882 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="proxy-httpd" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.693897 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c880aae1-2230-47fb-94b5-5b7337a19cbe" containerName="neutron-httpd" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.693912 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c880aae1-2230-47fb-94b5-5b7337a19cbe" containerName="neutron-api" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.693924 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="sg-core" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.693934 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="ceilometer-central-agent" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.693953 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" containerName="ceilometer-notification-agent" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.695441 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.698412 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.699839 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.706498 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.827432 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-run-httpd\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.827817 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.827955 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vc8z\" (UniqueName: \"kubernetes.io/projected/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-kube-api-access-2vc8z\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.828128 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-scripts\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.828227 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.828443 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-config-data\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.828525 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-log-httpd\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.902607 5055 scope.go:117] "RemoveContainer" containerID="c20dfd509e9657fbfbe4d947ae153763a2644f603e9f7401abe2e505c4395247" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.921294 5055 scope.go:117] "RemoveContainer" containerID="dc5e3e9c31f7e3807ca0c036e7cf63001e5268b174b4ee2dc45fad0916db8ead" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.929988 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-scripts\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.930029 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.930130 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-config-data\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.930165 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-log-httpd\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.930187 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-run-httpd\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.930203 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.930225 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vc8z\" (UniqueName: \"kubernetes.io/projected/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-kube-api-access-2vc8z\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.930971 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-run-httpd\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.931112 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-log-httpd\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.935504 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.936539 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-config-data\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.937166 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-scripts\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.939725 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.950654 5055 scope.go:117] "RemoveContainer" containerID="db57e67379770490a8db7f2d2642fb089364b9b70775292e10e1232599efb02a" Oct 11 07:15:47 crc kubenswrapper[5055]: I1011 07:15:47.957851 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vc8z\" (UniqueName: \"kubernetes.io/projected/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-kube-api-access-2vc8z\") pod \"ceilometer-0\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " pod="openstack/ceilometer-0" Oct 11 07:15:48 crc kubenswrapper[5055]: I1011 07:15:48.017904 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:15:48 crc kubenswrapper[5055]: I1011 07:15:48.482099 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:15:48 crc kubenswrapper[5055]: W1011 07:15:48.483845 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d3cc469_1b87_4aa6_ae06_a9101f3e1856.slice/crio-7792c44896a50c744e498f501f06983fe39ee71bf7d4ec7f9d2cf18b7ef4e55d WatchSource:0}: Error finding container 7792c44896a50c744e498f501f06983fe39ee71bf7d4ec7f9d2cf18b7ef4e55d: Status 404 returned error can't find the container with id 7792c44896a50c744e498f501f06983fe39ee71bf7d4ec7f9d2cf18b7ef4e55d Oct 11 07:15:48 crc kubenswrapper[5055]: I1011 07:15:48.640823 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d3cc469-1b87-4aa6-ae06-a9101f3e1856","Type":"ContainerStarted","Data":"7792c44896a50c744e498f501f06983fe39ee71bf7d4ec7f9d2cf18b7ef4e55d"} Oct 11 07:15:49 crc kubenswrapper[5055]: I1011 07:15:49.004028 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="936f513e-ae54-458c-8dfc-52eb27c8d5f6" path="/var/lib/kubelet/pods/936f513e-ae54-458c-8dfc-52eb27c8d5f6/volumes" Oct 11 07:15:49 crc kubenswrapper[5055]: I1011 07:15:49.651469 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d3cc469-1b87-4aa6-ae06-a9101f3e1856","Type":"ContainerStarted","Data":"99b8e0e77bce7a9609d8c616fa1dd82df1eb7d6c7f7375efde8aa4a02a52f53f"} Oct 11 07:15:50 crc kubenswrapper[5055]: I1011 07:15:50.662526 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d3cc469-1b87-4aa6-ae06-a9101f3e1856","Type":"ContainerStarted","Data":"d91e250815d513271c4e8b7629aa20776bfc8120ee5e2c6c2e38372d72f9cfa1"} Oct 11 07:15:51 crc kubenswrapper[5055]: I1011 07:15:51.672480 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d3cc469-1b87-4aa6-ae06-a9101f3e1856","Type":"ContainerStarted","Data":"81aebdce5ff0d2d71d85e23f0189a8a43dfc16d4436dd33b4e3f7fd7200a8c70"} Oct 11 07:15:52 crc kubenswrapper[5055]: I1011 07:15:52.683955 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d3cc469-1b87-4aa6-ae06-a9101f3e1856","Type":"ContainerStarted","Data":"d8afe9a5c979f3f1eb7ab18b882a35b01b602d9431a151fae8144443ca99ff0b"} Oct 11 07:15:52 crc kubenswrapper[5055]: I1011 07:15:52.684263 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 11 07:15:52 crc kubenswrapper[5055]: I1011 07:15:52.712039 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.081796223 podStartE2EDuration="5.712017809s" podCreationTimestamp="2025-10-11 07:15:47 +0000 UTC" firstStartedPulling="2025-10-11 07:15:48.48649871 +0000 UTC m=+1332.260772517" lastFinishedPulling="2025-10-11 07:15:52.116720296 +0000 UTC m=+1335.890994103" observedRunningTime="2025-10-11 07:15:52.70673383 +0000 UTC m=+1336.481007667" watchObservedRunningTime="2025-10-11 07:15:52.712017809 +0000 UTC m=+1336.486291636" Oct 11 07:15:56 crc kubenswrapper[5055]: I1011 07:15:56.719112 5055 generic.go:334] "Generic (PLEG): container finished" podID="eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d" containerID="84af3482a1b5087b3f9fce032a75fe9ea67e0df26b99966077869dc24981e2fa" exitCode=0 Oct 11 07:15:56 crc kubenswrapper[5055]: I1011 07:15:56.719188 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-94mmk" event={"ID":"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d","Type":"ContainerDied","Data":"84af3482a1b5087b3f9fce032a75fe9ea67e0df26b99966077869dc24981e2fa"} Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.052953 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.209803 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-scripts\") pod \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.209896 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-combined-ca-bundle\") pod \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.209934 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnvs6\" (UniqueName: \"kubernetes.io/projected/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-kube-api-access-wnvs6\") pod \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.209981 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-config-data\") pod \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.216142 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-scripts" (OuterVolumeSpecName: "scripts") pod "eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d" (UID: "eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.218754 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-kube-api-access-wnvs6" (OuterVolumeSpecName: "kube-api-access-wnvs6") pod "eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d" (UID: "eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d"). InnerVolumeSpecName "kube-api-access-wnvs6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:15:58 crc kubenswrapper[5055]: E1011 07:15:58.234924 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-config-data podName:eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d nodeName:}" failed. No retries permitted until 2025-10-11 07:15:58.734891175 +0000 UTC m=+1342.509164982 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config-data" (UniqueName: "kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-config-data") pod "eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d" (UID: "eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d") : error deleting /var/lib/kubelet/pods/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d/volume-subpaths: remove /var/lib/kubelet/pods/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d/volume-subpaths: no such file or directory Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.238009 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d" (UID: "eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.312621 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.312655 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.312666 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnvs6\" (UniqueName: \"kubernetes.io/projected/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-kube-api-access-wnvs6\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.738704 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-94mmk" event={"ID":"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d","Type":"ContainerDied","Data":"c01fd74d8ce822e76a0fdeec132e24db41087f2d2aff9b7f3a6b0007a7fd7b4b"} Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.739098 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c01fd74d8ce822e76a0fdeec132e24db41087f2d2aff9b7f3a6b0007a7fd7b4b" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.738782 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-94mmk" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.821166 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-config-data\") pod \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\" (UID: \"eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d\") " Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.825936 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-config-data" (OuterVolumeSpecName: "config-data") pod "eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d" (UID: "eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.836554 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 11 07:15:58 crc kubenswrapper[5055]: E1011 07:15:58.837248 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d" containerName="nova-cell0-conductor-db-sync" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.837362 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d" containerName="nova-cell0-conductor-db-sync" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.837656 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d" containerName="nova-cell0-conductor-db-sync" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.841006 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.847044 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.922954 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\") " pod="openstack/nova-cell0-conductor-0" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.923291 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\") " pod="openstack/nova-cell0-conductor-0" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.923499 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9fwb\" (UniqueName: \"kubernetes.io/projected/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-kube-api-access-c9fwb\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\") " pod="openstack/nova-cell0-conductor-0" Oct 11 07:15:58 crc kubenswrapper[5055]: I1011 07:15:58.923714 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:15:59 crc kubenswrapper[5055]: I1011 07:15:59.025961 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9fwb\" (UniqueName: \"kubernetes.io/projected/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-kube-api-access-c9fwb\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\") " pod="openstack/nova-cell0-conductor-0" Oct 11 07:15:59 crc kubenswrapper[5055]: I1011 07:15:59.026071 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\") " pod="openstack/nova-cell0-conductor-0" Oct 11 07:15:59 crc kubenswrapper[5055]: I1011 07:15:59.026154 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\") " pod="openstack/nova-cell0-conductor-0" Oct 11 07:15:59 crc kubenswrapper[5055]: I1011 07:15:59.032181 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\") " pod="openstack/nova-cell0-conductor-0" Oct 11 07:15:59 crc kubenswrapper[5055]: I1011 07:15:59.032274 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\") " pod="openstack/nova-cell0-conductor-0" Oct 11 07:15:59 crc kubenswrapper[5055]: I1011 07:15:59.047103 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9fwb\" (UniqueName: \"kubernetes.io/projected/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-kube-api-access-c9fwb\") pod \"nova-cell0-conductor-0\" (UID: \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\") " pod="openstack/nova-cell0-conductor-0" Oct 11 07:15:59 crc kubenswrapper[5055]: I1011 07:15:59.194640 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Oct 11 07:15:59 crc kubenswrapper[5055]: I1011 07:15:59.613505 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 11 07:15:59 crc kubenswrapper[5055]: I1011 07:15:59.750371 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"6ff3f14d-ff09-4533-89d6-e53afbda0dd6","Type":"ContainerStarted","Data":"e1eb547c910a476ce37a41d13f0ba306a1547be5b15340a4cf5986f589b3ba80"} Oct 11 07:16:00 crc kubenswrapper[5055]: I1011 07:16:00.775885 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"6ff3f14d-ff09-4533-89d6-e53afbda0dd6","Type":"ContainerStarted","Data":"b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9"} Oct 11 07:16:00 crc kubenswrapper[5055]: I1011 07:16:00.776242 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Oct 11 07:16:00 crc kubenswrapper[5055]: I1011 07:16:00.798061 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.7980429559999997 podStartE2EDuration="2.798042956s" podCreationTimestamp="2025-10-11 07:15:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:16:00.793113037 +0000 UTC m=+1344.567386854" watchObservedRunningTime="2025-10-11 07:16:00.798042956 +0000 UTC m=+1344.572316763" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.222317 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.645229 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-bh6wt"] Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.646431 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.651699 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.651858 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.654912 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-bh6wt"] Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.750969 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-bh6wt\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.751344 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-config-data\") pod \"nova-cell0-cell-mapping-bh6wt\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.751431 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rt6g\" (UniqueName: \"kubernetes.io/projected/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-kube-api-access-6rt6g\") pod \"nova-cell0-cell-mapping-bh6wt\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.751456 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-scripts\") pod \"nova-cell0-cell-mapping-bh6wt\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.829897 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.831388 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.838115 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.839602 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.853254 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-bh6wt\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.853362 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-config-data\") pod \"nova-cell0-cell-mapping-bh6wt\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.853442 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rt6g\" (UniqueName: \"kubernetes.io/projected/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-kube-api-access-6rt6g\") pod \"nova-cell0-cell-mapping-bh6wt\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.853468 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-scripts\") pod \"nova-cell0-cell-mapping-bh6wt\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.860819 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-scripts\") pod \"nova-cell0-cell-mapping-bh6wt\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.861624 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-bh6wt\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.866253 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-config-data\") pod \"nova-cell0-cell-mapping-bh6wt\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.901561 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.903345 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.910847 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.915839 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.920959 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rt6g\" (UniqueName: \"kubernetes.io/projected/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-kube-api-access-6rt6g\") pod \"nova-cell0-cell-mapping-bh6wt\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.955862 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c330ab35-8c68-44b0-91a5-e7d58920a253-config-data\") pod \"nova-scheduler-0\" (UID: \"c330ab35-8c68-44b0-91a5-e7d58920a253\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.956040 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxl55\" (UniqueName: \"kubernetes.io/projected/c330ab35-8c68-44b0-91a5-e7d58920a253-kube-api-access-nxl55\") pod \"nova-scheduler-0\" (UID: \"c330ab35-8c68-44b0-91a5-e7d58920a253\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.956089 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c330ab35-8c68-44b0-91a5-e7d58920a253-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c330ab35-8c68-44b0-91a5-e7d58920a253\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:04 crc kubenswrapper[5055]: I1011 07:16:04.978214 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.060981 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d603548-a611-45f4-9afc-a12102f1910e-config-data\") pod \"nova-metadata-0\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " pod="openstack/nova-metadata-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.061036 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d603548-a611-45f4-9afc-a12102f1910e-logs\") pod \"nova-metadata-0\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " pod="openstack/nova-metadata-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.061078 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c330ab35-8c68-44b0-91a5-e7d58920a253-config-data\") pod \"nova-scheduler-0\" (UID: \"c330ab35-8c68-44b0-91a5-e7d58920a253\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.061122 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d603548-a611-45f4-9afc-a12102f1910e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " pod="openstack/nova-metadata-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.061146 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxl55\" (UniqueName: \"kubernetes.io/projected/c330ab35-8c68-44b0-91a5-e7d58920a253-kube-api-access-nxl55\") pod \"nova-scheduler-0\" (UID: \"c330ab35-8c68-44b0-91a5-e7d58920a253\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.061165 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f28b8\" (UniqueName: \"kubernetes.io/projected/2d603548-a611-45f4-9afc-a12102f1910e-kube-api-access-f28b8\") pod \"nova-metadata-0\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " pod="openstack/nova-metadata-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.061183 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c330ab35-8c68-44b0-91a5-e7d58920a253-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c330ab35-8c68-44b0-91a5-e7d58920a253\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.074619 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c330ab35-8c68-44b0-91a5-e7d58920a253-config-data\") pod \"nova-scheduler-0\" (UID: \"c330ab35-8c68-44b0-91a5-e7d58920a253\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.108690 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c330ab35-8c68-44b0-91a5-e7d58920a253-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"c330ab35-8c68-44b0-91a5-e7d58920a253\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.148546 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxl55\" (UniqueName: \"kubernetes.io/projected/c330ab35-8c68-44b0-91a5-e7d58920a253-kube-api-access-nxl55\") pod \"nova-scheduler-0\" (UID: \"c330ab35-8c68-44b0-91a5-e7d58920a253\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.149207 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.163934 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d603548-a611-45f4-9afc-a12102f1910e-config-data\") pod \"nova-metadata-0\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " pod="openstack/nova-metadata-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.164015 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d603548-a611-45f4-9afc-a12102f1910e-logs\") pod \"nova-metadata-0\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " pod="openstack/nova-metadata-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.164139 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d603548-a611-45f4-9afc-a12102f1910e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " pod="openstack/nova-metadata-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.164167 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f28b8\" (UniqueName: \"kubernetes.io/projected/2d603548-a611-45f4-9afc-a12102f1910e-kube-api-access-f28b8\") pod \"nova-metadata-0\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " pod="openstack/nova-metadata-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.168163 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d603548-a611-45f4-9afc-a12102f1910e-logs\") pod \"nova-metadata-0\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " pod="openstack/nova-metadata-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.170874 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.269357 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d603548-a611-45f4-9afc-a12102f1910e-config-data\") pod \"nova-metadata-0\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " pod="openstack/nova-metadata-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.272974 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.273684 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d603548-a611-45f4-9afc-a12102f1910e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " pod="openstack/nova-metadata-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.275908 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.276077 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.289900 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f28b8\" (UniqueName: \"kubernetes.io/projected/2d603548-a611-45f4-9afc-a12102f1910e-kube-api-access-f28b8\") pod \"nova-metadata-0\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " pod="openstack/nova-metadata-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.296256 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.297654 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.300227 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.330888 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6ffc974fdf-mlh8n"] Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.332572 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.353978 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.385229 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5b81d5d-20b0-421e-a67e-abedea3c766f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e5b81d5d-20b0-421e-a67e-abedea3c766f\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.385475 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-logs\") pod \"nova-api-0\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " pod="openstack/nova-api-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.387105 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " pod="openstack/nova-api-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.387167 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6s86v\" (UniqueName: \"kubernetes.io/projected/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-kube-api-access-6s86v\") pod \"nova-api-0\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " pod="openstack/nova-api-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.387335 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-config-data\") pod \"nova-api-0\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " pod="openstack/nova-api-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.387366 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b81d5d-20b0-421e-a67e-abedea3c766f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e5b81d5d-20b0-421e-a67e-abedea3c766f\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.388820 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxj9z\" (UniqueName: \"kubernetes.io/projected/e5b81d5d-20b0-421e-a67e-abedea3c766f-kube-api-access-wxj9z\") pod \"nova-cell1-novncproxy-0\" (UID: \"e5b81d5d-20b0-421e-a67e-abedea3c766f\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.392000 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6ffc974fdf-mlh8n"] Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.471003 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.494256 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-ovsdbserver-sb\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.494296 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgk6k\" (UniqueName: \"kubernetes.io/projected/44d34b4e-3d9f-4d8c-9e0b-818f54207174-kube-api-access-lgk6k\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.494333 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-logs\") pod \"nova-api-0\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " pod="openstack/nova-api-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.494367 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " pod="openstack/nova-api-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.494392 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6s86v\" (UniqueName: \"kubernetes.io/projected/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-kube-api-access-6s86v\") pod \"nova-api-0\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " pod="openstack/nova-api-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.494417 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-dns-svc\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.494449 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-config\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.494478 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-ovsdbserver-nb\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.494498 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-config-data\") pod \"nova-api-0\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " pod="openstack/nova-api-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.494516 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b81d5d-20b0-421e-a67e-abedea3c766f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e5b81d5d-20b0-421e-a67e-abedea3c766f\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.494545 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxj9z\" (UniqueName: \"kubernetes.io/projected/e5b81d5d-20b0-421e-a67e-abedea3c766f-kube-api-access-wxj9z\") pod \"nova-cell1-novncproxy-0\" (UID: \"e5b81d5d-20b0-421e-a67e-abedea3c766f\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.494578 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-dns-swift-storage-0\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.494606 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5b81d5d-20b0-421e-a67e-abedea3c766f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e5b81d5d-20b0-421e-a67e-abedea3c766f\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.496724 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-logs\") pod \"nova-api-0\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " pod="openstack/nova-api-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.502428 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " pod="openstack/nova-api-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.502445 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b81d5d-20b0-421e-a67e-abedea3c766f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e5b81d5d-20b0-421e-a67e-abedea3c766f\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.507873 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-config-data\") pod \"nova-api-0\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " pod="openstack/nova-api-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.510392 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5b81d5d-20b0-421e-a67e-abedea3c766f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e5b81d5d-20b0-421e-a67e-abedea3c766f\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.536423 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6s86v\" (UniqueName: \"kubernetes.io/projected/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-kube-api-access-6s86v\") pod \"nova-api-0\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " pod="openstack/nova-api-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.536848 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxj9z\" (UniqueName: \"kubernetes.io/projected/e5b81d5d-20b0-421e-a67e-abedea3c766f-kube-api-access-wxj9z\") pod \"nova-cell1-novncproxy-0\" (UID: \"e5b81d5d-20b0-421e-a67e-abedea3c766f\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.595687 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-dns-svc\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.595875 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-config\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.595988 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-ovsdbserver-nb\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.596097 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-dns-swift-storage-0\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.596195 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgk6k\" (UniqueName: \"kubernetes.io/projected/44d34b4e-3d9f-4d8c-9e0b-818f54207174-kube-api-access-lgk6k\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.596263 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-ovsdbserver-sb\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.597240 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-ovsdbserver-sb\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.597853 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-dns-svc\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.598480 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-config\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.599041 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-dns-swift-storage-0\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.599685 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-ovsdbserver-nb\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.626965 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgk6k\" (UniqueName: \"kubernetes.io/projected/44d34b4e-3d9f-4d8c-9e0b-818f54207174-kube-api-access-lgk6k\") pod \"dnsmasq-dns-6ffc974fdf-mlh8n\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.654645 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.677713 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.716155 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:05 crc kubenswrapper[5055]: I1011 07:16:05.967787 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-bh6wt"] Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.096405 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.153490 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xbzvs"] Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.154895 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.157539 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.157804 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.171777 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xbzvs"] Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.189476 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.311090 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-scripts\") pod \"nova-cell1-conductor-db-sync-xbzvs\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.311465 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-config-data\") pod \"nova-cell1-conductor-db-sync-xbzvs\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.312000 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-xbzvs\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.312121 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srh28\" (UniqueName: \"kubernetes.io/projected/75e6c9e3-4bce-4031-94a5-71396f262b51-kube-api-access-srh28\") pod \"nova-cell1-conductor-db-sync-xbzvs\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:06 crc kubenswrapper[5055]: W1011 07:16:06.369047 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f24e7cc_b698_4558_bc8f_e9ee1e3e0fcd.slice/crio-a04fecef429d59b89d249ca156fc6c70d2a64e9a6fed100268433d743454d42a WatchSource:0}: Error finding container a04fecef429d59b89d249ca156fc6c70d2a64e9a6fed100268433d743454d42a: Status 404 returned error can't find the container with id a04fecef429d59b89d249ca156fc6c70d2a64e9a6fed100268433d743454d42a Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.370615 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.391309 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.413852 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-scripts\") pod \"nova-cell1-conductor-db-sync-xbzvs\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.413909 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-config-data\") pod \"nova-cell1-conductor-db-sync-xbzvs\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.414019 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-xbzvs\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.414139 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srh28\" (UniqueName: \"kubernetes.io/projected/75e6c9e3-4bce-4031-94a5-71396f262b51-kube-api-access-srh28\") pod \"nova-cell1-conductor-db-sync-xbzvs\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.422749 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-xbzvs\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.427119 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-config-data\") pod \"nova-cell1-conductor-db-sync-xbzvs\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.430198 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-scripts\") pod \"nova-cell1-conductor-db-sync-xbzvs\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.438928 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srh28\" (UniqueName: \"kubernetes.io/projected/75e6c9e3-4bce-4031-94a5-71396f262b51-kube-api-access-srh28\") pod \"nova-cell1-conductor-db-sync-xbzvs\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.508898 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.547423 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6ffc974fdf-mlh8n"] Oct 11 07:16:06 crc kubenswrapper[5055]: W1011 07:16:06.561980 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44d34b4e_3d9f_4d8c_9e0b_818f54207174.slice/crio-06aa87f2feec55dc8d8526178c948490e64b8d67700318048c257792057e7fdb WatchSource:0}: Error finding container 06aa87f2feec55dc8d8526178c948490e64b8d67700318048c257792057e7fdb: Status 404 returned error can't find the container with id 06aa87f2feec55dc8d8526178c948490e64b8d67700318048c257792057e7fdb Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.899037 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bh6wt" event={"ID":"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd","Type":"ContainerStarted","Data":"59f1f8256c40f1b0fc187bbfdda406a2648bc79c5276cfc25ef7d0febc6bb0f4"} Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.899344 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bh6wt" event={"ID":"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd","Type":"ContainerStarted","Data":"1a9b0620b5cae745d1cc8611c8c773d2d1556fde915bb728c45d20eee34bcb3f"} Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.910979 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e5b81d5d-20b0-421e-a67e-abedea3c766f","Type":"ContainerStarted","Data":"9ed4838e161d0ebc4b5b72abb922faa095f682871fe46953d4430f2795716cbc"} Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.919454 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-bh6wt" podStartSLOduration=2.919437656 podStartE2EDuration="2.919437656s" podCreationTimestamp="2025-10-11 07:16:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:16:06.914012293 +0000 UTC m=+1350.688286100" watchObservedRunningTime="2025-10-11 07:16:06.919437656 +0000 UTC m=+1350.693711463" Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.923619 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2d603548-a611-45f4-9afc-a12102f1910e","Type":"ContainerStarted","Data":"d23ceffcec6e16461dcf554e74509f4c6466db1481ae8413c9ba48b272972193"} Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.935836 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd","Type":"ContainerStarted","Data":"a04fecef429d59b89d249ca156fc6c70d2a64e9a6fed100268433d743454d42a"} Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.939479 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" event={"ID":"44d34b4e-3d9f-4d8c-9e0b-818f54207174","Type":"ContainerStarted","Data":"06aa87f2feec55dc8d8526178c948490e64b8d67700318048c257792057e7fdb"} Oct 11 07:16:06 crc kubenswrapper[5055]: I1011 07:16:06.944419 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c330ab35-8c68-44b0-91a5-e7d58920a253","Type":"ContainerStarted","Data":"a7a566c9f12536d073937b9c5bcc571acadca5f77fb46cce827dd0ebc3e6b8db"} Oct 11 07:16:07 crc kubenswrapper[5055]: I1011 07:16:07.033264 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xbzvs"] Oct 11 07:16:07 crc kubenswrapper[5055]: I1011 07:16:07.959174 5055 generic.go:334] "Generic (PLEG): container finished" podID="44d34b4e-3d9f-4d8c-9e0b-818f54207174" containerID="6aaa9ba325ed091ed92e606e40a5233df374e585e7a750933844ee44f6e77983" exitCode=0 Oct 11 07:16:07 crc kubenswrapper[5055]: I1011 07:16:07.959233 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" event={"ID":"44d34b4e-3d9f-4d8c-9e0b-818f54207174","Type":"ContainerDied","Data":"6aaa9ba325ed091ed92e606e40a5233df374e585e7a750933844ee44f6e77983"} Oct 11 07:16:07 crc kubenswrapper[5055]: I1011 07:16:07.962666 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-xbzvs" event={"ID":"75e6c9e3-4bce-4031-94a5-71396f262b51","Type":"ContainerStarted","Data":"e924a4a0bf51b244d095f694add529a8eefd25c7b9d6ffbe0297fb57e1f80c20"} Oct 11 07:16:07 crc kubenswrapper[5055]: I1011 07:16:07.962703 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-xbzvs" event={"ID":"75e6c9e3-4bce-4031-94a5-71396f262b51","Type":"ContainerStarted","Data":"0f9e9d4e54c63656ab60d3522d1c1c4be70d52337889ecf63f7264097beea113"} Oct 11 07:16:08 crc kubenswrapper[5055]: I1011 07:16:08.004736 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-xbzvs" podStartSLOduration=2.004718046 podStartE2EDuration="2.004718046s" podCreationTimestamp="2025-10-11 07:16:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:16:08.003486641 +0000 UTC m=+1351.777760448" watchObservedRunningTime="2025-10-11 07:16:08.004718046 +0000 UTC m=+1351.778991853" Oct 11 07:16:08 crc kubenswrapper[5055]: I1011 07:16:08.470477 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:08 crc kubenswrapper[5055]: I1011 07:16:08.480130 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 07:16:09 crc kubenswrapper[5055]: I1011 07:16:09.984825 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c330ab35-8c68-44b0-91a5-e7d58920a253","Type":"ContainerStarted","Data":"26e2abdf5454e4568f480c1b6f0ba4535962d3ba9135adeb1158a0fd9c517759"} Oct 11 07:16:09 crc kubenswrapper[5055]: I1011 07:16:09.986741 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e5b81d5d-20b0-421e-a67e-abedea3c766f","Type":"ContainerStarted","Data":"d04caa2dc645f7bbb8682de12d2890e086f10d0e8c16d48ca42d7795e5ba4efa"} Oct 11 07:16:09 crc kubenswrapper[5055]: I1011 07:16:09.987060 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="e5b81d5d-20b0-421e-a67e-abedea3c766f" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://d04caa2dc645f7bbb8682de12d2890e086f10d0e8c16d48ca42d7795e5ba4efa" gracePeriod=30 Oct 11 07:16:09 crc kubenswrapper[5055]: I1011 07:16:09.988691 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2d603548-a611-45f4-9afc-a12102f1910e","Type":"ContainerStarted","Data":"fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71"} Oct 11 07:16:09 crc kubenswrapper[5055]: I1011 07:16:09.992587 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd","Type":"ContainerStarted","Data":"4009fdfa9244b6b2c3e9c55f9e2479a86b2457e65630baf744707264b98a0959"} Oct 11 07:16:09 crc kubenswrapper[5055]: I1011 07:16:09.996597 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" event={"ID":"44d34b4e-3d9f-4d8c-9e0b-818f54207174","Type":"ContainerStarted","Data":"e0f8f4f5ee8e27ab9d848e71a6fa0ca6a9e1475c91201c56ce1efa6697fb6e46"} Oct 11 07:16:09 crc kubenswrapper[5055]: I1011 07:16:09.996774 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:10 crc kubenswrapper[5055]: I1011 07:16:10.010133 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.649524584 podStartE2EDuration="6.010116794s" podCreationTimestamp="2025-10-11 07:16:04 +0000 UTC" firstStartedPulling="2025-10-11 07:16:06.094303613 +0000 UTC m=+1349.868577420" lastFinishedPulling="2025-10-11 07:16:09.454895823 +0000 UTC m=+1353.229169630" observedRunningTime="2025-10-11 07:16:10.008855038 +0000 UTC m=+1353.783128845" watchObservedRunningTime="2025-10-11 07:16:10.010116794 +0000 UTC m=+1353.784390591" Oct 11 07:16:10 crc kubenswrapper[5055]: I1011 07:16:10.033542 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=1.9543716340000001 podStartE2EDuration="5.033525256s" podCreationTimestamp="2025-10-11 07:16:05 +0000 UTC" firstStartedPulling="2025-10-11 07:16:06.375367061 +0000 UTC m=+1350.149640868" lastFinishedPulling="2025-10-11 07:16:09.454520683 +0000 UTC m=+1353.228794490" observedRunningTime="2025-10-11 07:16:10.023557234 +0000 UTC m=+1353.797831051" watchObservedRunningTime="2025-10-11 07:16:10.033525256 +0000 UTC m=+1353.807799063" Oct 11 07:16:10 crc kubenswrapper[5055]: I1011 07:16:10.048204 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" podStartSLOduration=5.04818655 podStartE2EDuration="5.04818655s" podCreationTimestamp="2025-10-11 07:16:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:16:10.043691583 +0000 UTC m=+1353.817965400" watchObservedRunningTime="2025-10-11 07:16:10.04818655 +0000 UTC m=+1353.822460347" Oct 11 07:16:10 crc kubenswrapper[5055]: I1011 07:16:10.150750 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 11 07:16:10 crc kubenswrapper[5055]: I1011 07:16:10.678551 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.009157 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2d603548-a611-45f4-9afc-a12102f1910e" containerName="nova-metadata-log" containerID="cri-o://fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71" gracePeriod=30 Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.009177 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2d603548-a611-45f4-9afc-a12102f1910e" containerName="nova-metadata-metadata" containerID="cri-o://409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec" gracePeriod=30 Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.010788 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd","Type":"ContainerStarted","Data":"429129fbc12ca3fcba2f88e902e64ca87bfb6df566ec043ece4f8a07097b8466"} Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.010813 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2d603548-a611-45f4-9afc-a12102f1910e","Type":"ContainerStarted","Data":"409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec"} Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.031419 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.953605771 podStartE2EDuration="6.031401694s" podCreationTimestamp="2025-10-11 07:16:05 +0000 UTC" firstStartedPulling="2025-10-11 07:16:06.381915236 +0000 UTC m=+1350.156189043" lastFinishedPulling="2025-10-11 07:16:09.459711159 +0000 UTC m=+1353.233984966" observedRunningTime="2025-10-11 07:16:11.025497877 +0000 UTC m=+1354.799771684" watchObservedRunningTime="2025-10-11 07:16:11.031401694 +0000 UTC m=+1354.805675501" Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.051226 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.771650665 podStartE2EDuration="7.051205484s" podCreationTimestamp="2025-10-11 07:16:04 +0000 UTC" firstStartedPulling="2025-10-11 07:16:06.181452687 +0000 UTC m=+1349.955726484" lastFinishedPulling="2025-10-11 07:16:09.461007496 +0000 UTC m=+1353.235281303" observedRunningTime="2025-10-11 07:16:11.048722164 +0000 UTC m=+1354.822995981" watchObservedRunningTime="2025-10-11 07:16:11.051205484 +0000 UTC m=+1354.825479291" Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.597043 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.723147 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f28b8\" (UniqueName: \"kubernetes.io/projected/2d603548-a611-45f4-9afc-a12102f1910e-kube-api-access-f28b8\") pod \"2d603548-a611-45f4-9afc-a12102f1910e\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.723224 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d603548-a611-45f4-9afc-a12102f1910e-config-data\") pod \"2d603548-a611-45f4-9afc-a12102f1910e\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.723261 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d603548-a611-45f4-9afc-a12102f1910e-logs\") pod \"2d603548-a611-45f4-9afc-a12102f1910e\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.723296 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d603548-a611-45f4-9afc-a12102f1910e-combined-ca-bundle\") pod \"2d603548-a611-45f4-9afc-a12102f1910e\" (UID: \"2d603548-a611-45f4-9afc-a12102f1910e\") " Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.723708 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d603548-a611-45f4-9afc-a12102f1910e-logs" (OuterVolumeSpecName: "logs") pod "2d603548-a611-45f4-9afc-a12102f1910e" (UID: "2d603548-a611-45f4-9afc-a12102f1910e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.723970 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d603548-a611-45f4-9afc-a12102f1910e-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.728820 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d603548-a611-45f4-9afc-a12102f1910e-kube-api-access-f28b8" (OuterVolumeSpecName: "kube-api-access-f28b8") pod "2d603548-a611-45f4-9afc-a12102f1910e" (UID: "2d603548-a611-45f4-9afc-a12102f1910e"). InnerVolumeSpecName "kube-api-access-f28b8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.754504 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d603548-a611-45f4-9afc-a12102f1910e-config-data" (OuterVolumeSpecName: "config-data") pod "2d603548-a611-45f4-9afc-a12102f1910e" (UID: "2d603548-a611-45f4-9afc-a12102f1910e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.754829 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d603548-a611-45f4-9afc-a12102f1910e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2d603548-a611-45f4-9afc-a12102f1910e" (UID: "2d603548-a611-45f4-9afc-a12102f1910e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.825807 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d603548-a611-45f4-9afc-a12102f1910e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.825846 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f28b8\" (UniqueName: \"kubernetes.io/projected/2d603548-a611-45f4-9afc-a12102f1910e-kube-api-access-f28b8\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:11 crc kubenswrapper[5055]: I1011 07:16:11.825860 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d603548-a611-45f4-9afc-a12102f1910e-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.017227 5055 generic.go:334] "Generic (PLEG): container finished" podID="2d603548-a611-45f4-9afc-a12102f1910e" containerID="409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec" exitCode=0 Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.017253 5055 generic.go:334] "Generic (PLEG): container finished" podID="2d603548-a611-45f4-9afc-a12102f1910e" containerID="fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71" exitCode=143 Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.018122 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.023985 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2d603548-a611-45f4-9afc-a12102f1910e","Type":"ContainerDied","Data":"409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec"} Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.024174 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2d603548-a611-45f4-9afc-a12102f1910e","Type":"ContainerDied","Data":"fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71"} Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.024240 5055 scope.go:117] "RemoveContainer" containerID="409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.024277 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2d603548-a611-45f4-9afc-a12102f1910e","Type":"ContainerDied","Data":"d23ceffcec6e16461dcf554e74509f4c6466db1481ae8413c9ba48b272972193"} Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.053416 5055 scope.go:117] "RemoveContainer" containerID="fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.053567 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.068178 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.078038 5055 scope.go:117] "RemoveContainer" containerID="409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.088964 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:12 crc kubenswrapper[5055]: E1011 07:16:12.089360 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d603548-a611-45f4-9afc-a12102f1910e" containerName="nova-metadata-metadata" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.089380 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d603548-a611-45f4-9afc-a12102f1910e" containerName="nova-metadata-metadata" Oct 11 07:16:12 crc kubenswrapper[5055]: E1011 07:16:12.089425 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d603548-a611-45f4-9afc-a12102f1910e" containerName="nova-metadata-log" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.089436 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d603548-a611-45f4-9afc-a12102f1910e" containerName="nova-metadata-log" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.089667 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d603548-a611-45f4-9afc-a12102f1910e" containerName="nova-metadata-metadata" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.089687 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d603548-a611-45f4-9afc-a12102f1910e" containerName="nova-metadata-log" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.090552 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.090641 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.092558 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.092758 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 11 07:16:12 crc kubenswrapper[5055]: E1011 07:16:12.109150 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec\": container with ID starting with 409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec not found: ID does not exist" containerID="409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.109198 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec"} err="failed to get container status \"409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec\": rpc error: code = NotFound desc = could not find container \"409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec\": container with ID starting with 409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec not found: ID does not exist" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.109227 5055 scope.go:117] "RemoveContainer" containerID="fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71" Oct 11 07:16:12 crc kubenswrapper[5055]: E1011 07:16:12.109518 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71\": container with ID starting with fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71 not found: ID does not exist" containerID="fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.109541 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71"} err="failed to get container status \"fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71\": rpc error: code = NotFound desc = could not find container \"fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71\": container with ID starting with fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71 not found: ID does not exist" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.109556 5055 scope.go:117] "RemoveContainer" containerID="409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.111562 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec"} err="failed to get container status \"409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec\": rpc error: code = NotFound desc = could not find container \"409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec\": container with ID starting with 409bc6cdb6ab3349b51695d194f87afd8f5bc2f39f2cd625ce9600959d9f3aec not found: ID does not exist" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.111618 5055 scope.go:117] "RemoveContainer" containerID="fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.111955 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71"} err="failed to get container status \"fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71\": rpc error: code = NotFound desc = could not find container \"fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71\": container with ID starting with fe450ce9893bb5ea37a641c68e78a791c33c02d5334f5f6adbedaef6e2667e71 not found: ID does not exist" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.233464 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2mcw\" (UniqueName: \"kubernetes.io/projected/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-kube-api-access-m2mcw\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.233739 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-logs\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.233830 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.233858 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.233906 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-config-data\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.335101 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-config-data\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.335213 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2mcw\" (UniqueName: \"kubernetes.io/projected/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-kube-api-access-m2mcw\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.335240 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-logs\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.335293 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.335318 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.335793 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-logs\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.339193 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.339849 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-config-data\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.350849 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.357297 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2mcw\" (UniqueName: \"kubernetes.io/projected/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-kube-api-access-m2mcw\") pod \"nova-metadata-0\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.426701 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:16:12 crc kubenswrapper[5055]: I1011 07:16:12.843719 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:12 crc kubenswrapper[5055]: W1011 07:16:12.853254 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa1fb477_16ef_4bd5_85f2_66ff215f8ea9.slice/crio-17ce7f67952d76eb17278b539897acee53c8b172d0d22d667b8829ca84b4b66e WatchSource:0}: Error finding container 17ce7f67952d76eb17278b539897acee53c8b172d0d22d667b8829ca84b4b66e: Status 404 returned error can't find the container with id 17ce7f67952d76eb17278b539897acee53c8b172d0d22d667b8829ca84b4b66e Oct 11 07:16:13 crc kubenswrapper[5055]: I1011 07:16:13.017888 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d603548-a611-45f4-9afc-a12102f1910e" path="/var/lib/kubelet/pods/2d603548-a611-45f4-9afc-a12102f1910e/volumes" Oct 11 07:16:13 crc kubenswrapper[5055]: I1011 07:16:13.049757 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9","Type":"ContainerStarted","Data":"17ce7f67952d76eb17278b539897acee53c8b172d0d22d667b8829ca84b4b66e"} Oct 11 07:16:14 crc kubenswrapper[5055]: I1011 07:16:14.059063 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9","Type":"ContainerStarted","Data":"26b5fa94e25ae25c7c39e4e637b9725316d70bb994d96e0bd933c7a55fbab585"} Oct 11 07:16:14 crc kubenswrapper[5055]: I1011 07:16:14.059410 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9","Type":"ContainerStarted","Data":"521939f34b4938db63601bec086d69eec089b7529d44417134282b40f2a1b673"} Oct 11 07:16:14 crc kubenswrapper[5055]: I1011 07:16:14.061596 5055 generic.go:334] "Generic (PLEG): container finished" podID="75e6c9e3-4bce-4031-94a5-71396f262b51" containerID="e924a4a0bf51b244d095f694add529a8eefd25c7b9d6ffbe0297fb57e1f80c20" exitCode=0 Oct 11 07:16:14 crc kubenswrapper[5055]: I1011 07:16:14.061649 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-xbzvs" event={"ID":"75e6c9e3-4bce-4031-94a5-71396f262b51","Type":"ContainerDied","Data":"e924a4a0bf51b244d095f694add529a8eefd25c7b9d6ffbe0297fb57e1f80c20"} Oct 11 07:16:14 crc kubenswrapper[5055]: I1011 07:16:14.063574 5055 generic.go:334] "Generic (PLEG): container finished" podID="c13685b1-481e-4a5b-9a66-94c6c9fc0fcd" containerID="59f1f8256c40f1b0fc187bbfdda406a2648bc79c5276cfc25ef7d0febc6bb0f4" exitCode=0 Oct 11 07:16:14 crc kubenswrapper[5055]: I1011 07:16:14.063605 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bh6wt" event={"ID":"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd","Type":"ContainerDied","Data":"59f1f8256c40f1b0fc187bbfdda406a2648bc79c5276cfc25ef7d0febc6bb0f4"} Oct 11 07:16:14 crc kubenswrapper[5055]: I1011 07:16:14.090495 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.090476508 podStartE2EDuration="2.090476508s" podCreationTimestamp="2025-10-11 07:16:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:16:14.077276985 +0000 UTC m=+1357.851550792" watchObservedRunningTime="2025-10-11 07:16:14.090476508 +0000 UTC m=+1357.864750315" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.150175 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.184144 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.508466 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.514635 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.603645 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-combined-ca-bundle\") pod \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.603730 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-combined-ca-bundle\") pod \"75e6c9e3-4bce-4031-94a5-71396f262b51\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.603803 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srh28\" (UniqueName: \"kubernetes.io/projected/75e6c9e3-4bce-4031-94a5-71396f262b51-kube-api-access-srh28\") pod \"75e6c9e3-4bce-4031-94a5-71396f262b51\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.603855 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-config-data\") pod \"75e6c9e3-4bce-4031-94a5-71396f262b51\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.603912 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rt6g\" (UniqueName: \"kubernetes.io/projected/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-kube-api-access-6rt6g\") pod \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.603936 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-config-data\") pod \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.603968 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-scripts\") pod \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\" (UID: \"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd\") " Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.604002 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-scripts\") pod \"75e6c9e3-4bce-4031-94a5-71396f262b51\" (UID: \"75e6c9e3-4bce-4031-94a5-71396f262b51\") " Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.609288 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-kube-api-access-6rt6g" (OuterVolumeSpecName: "kube-api-access-6rt6g") pod "c13685b1-481e-4a5b-9a66-94c6c9fc0fcd" (UID: "c13685b1-481e-4a5b-9a66-94c6c9fc0fcd"). InnerVolumeSpecName "kube-api-access-6rt6g". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.610337 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-scripts" (OuterVolumeSpecName: "scripts") pod "75e6c9e3-4bce-4031-94a5-71396f262b51" (UID: "75e6c9e3-4bce-4031-94a5-71396f262b51"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.610964 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-scripts" (OuterVolumeSpecName: "scripts") pod "c13685b1-481e-4a5b-9a66-94c6c9fc0fcd" (UID: "c13685b1-481e-4a5b-9a66-94c6c9fc0fcd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.617561 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75e6c9e3-4bce-4031-94a5-71396f262b51-kube-api-access-srh28" (OuterVolumeSpecName: "kube-api-access-srh28") pod "75e6c9e3-4bce-4031-94a5-71396f262b51" (UID: "75e6c9e3-4bce-4031-94a5-71396f262b51"). InnerVolumeSpecName "kube-api-access-srh28". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.631701 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "75e6c9e3-4bce-4031-94a5-71396f262b51" (UID: "75e6c9e3-4bce-4031-94a5-71396f262b51"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.633046 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c13685b1-481e-4a5b-9a66-94c6c9fc0fcd" (UID: "c13685b1-481e-4a5b-9a66-94c6c9fc0fcd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.634616 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-config-data" (OuterVolumeSpecName: "config-data") pod "c13685b1-481e-4a5b-9a66-94c6c9fc0fcd" (UID: "c13685b1-481e-4a5b-9a66-94c6c9fc0fcd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.646076 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-config-data" (OuterVolumeSpecName: "config-data") pod "75e6c9e3-4bce-4031-94a5-71396f262b51" (UID: "75e6c9e3-4bce-4031-94a5-71396f262b51"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.655724 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.655787 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.707352 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rt6g\" (UniqueName: \"kubernetes.io/projected/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-kube-api-access-6rt6g\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.707384 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.707393 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.707403 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.707411 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.707420 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.707430 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srh28\" (UniqueName: \"kubernetes.io/projected/75e6c9e3-4bce-4031-94a5-71396f262b51-kube-api-access-srh28\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.707438 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75e6c9e3-4bce-4031-94a5-71396f262b51-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.718348 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.773164 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bd785c49-64j5m"] Oct 11 07:16:15 crc kubenswrapper[5055]: I1011 07:16:15.773430 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-84bd785c49-64j5m" podUID="c13d308b-a780-44eb-8574-0590dc8316a8" containerName="dnsmasq-dns" containerID="cri-o://aac783028ec9596d55d8bef35fe4aa03e5b126d8adc851cd8aea1aa6f04ad467" gracePeriod=10 Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.082956 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bh6wt" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.082994 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bh6wt" event={"ID":"c13685b1-481e-4a5b-9a66-94c6c9fc0fcd","Type":"ContainerDied","Data":"1a9b0620b5cae745d1cc8611c8c773d2d1556fde915bb728c45d20eee34bcb3f"} Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.084280 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a9b0620b5cae745d1cc8611c8c773d2d1556fde915bb728c45d20eee34bcb3f" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.088509 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-xbzvs" event={"ID":"75e6c9e3-4bce-4031-94a5-71396f262b51","Type":"ContainerDied","Data":"0f9e9d4e54c63656ab60d3522d1c1c4be70d52337889ecf63f7264097beea113"} Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.088573 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f9e9d4e54c63656ab60d3522d1c1c4be70d52337889ecf63f7264097beea113" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.088540 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-xbzvs" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.092385 5055 generic.go:334] "Generic (PLEG): container finished" podID="c13d308b-a780-44eb-8574-0590dc8316a8" containerID="aac783028ec9596d55d8bef35fe4aa03e5b126d8adc851cd8aea1aa6f04ad467" exitCode=0 Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.093674 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bd785c49-64j5m" event={"ID":"c13d308b-a780-44eb-8574-0590dc8316a8","Type":"ContainerDied","Data":"aac783028ec9596d55d8bef35fe4aa03e5b126d8adc851cd8aea1aa6f04ad467"} Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.135710 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.176015 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.189729 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 11 07:16:16 crc kubenswrapper[5055]: E1011 07:16:16.190102 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c13d308b-a780-44eb-8574-0590dc8316a8" containerName="init" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.190122 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c13d308b-a780-44eb-8574-0590dc8316a8" containerName="init" Oct 11 07:16:16 crc kubenswrapper[5055]: E1011 07:16:16.190147 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75e6c9e3-4bce-4031-94a5-71396f262b51" containerName="nova-cell1-conductor-db-sync" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.190154 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="75e6c9e3-4bce-4031-94a5-71396f262b51" containerName="nova-cell1-conductor-db-sync" Oct 11 07:16:16 crc kubenswrapper[5055]: E1011 07:16:16.190166 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c13685b1-481e-4a5b-9a66-94c6c9fc0fcd" containerName="nova-manage" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.190175 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c13685b1-481e-4a5b-9a66-94c6c9fc0fcd" containerName="nova-manage" Oct 11 07:16:16 crc kubenswrapper[5055]: E1011 07:16:16.190185 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c13d308b-a780-44eb-8574-0590dc8316a8" containerName="dnsmasq-dns" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.190192 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c13d308b-a780-44eb-8574-0590dc8316a8" containerName="dnsmasq-dns" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.190369 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="75e6c9e3-4bce-4031-94a5-71396f262b51" containerName="nova-cell1-conductor-db-sync" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.190391 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c13d308b-a780-44eb-8574-0590dc8316a8" containerName="dnsmasq-dns" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.190406 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c13685b1-481e-4a5b-9a66-94c6c9fc0fcd" containerName="nova-manage" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.191051 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.192835 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.215030 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.332044 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.332304 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" containerName="nova-api-log" containerID="cri-o://4009fdfa9244b6b2c3e9c55f9e2479a86b2457e65630baf744707264b98a0959" gracePeriod=30 Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.332699 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" containerName="nova-api-api" containerID="cri-o://429129fbc12ca3fcba2f88e902e64ca87bfb6df566ec043ece4f8a07097b8466" gracePeriod=30 Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.343562 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.183:8774/\": EOF" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.343697 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.183:8774/\": EOF" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.346245 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.346594 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" containerName="nova-metadata-log" containerID="cri-o://521939f34b4938db63601bec086d69eec089b7529d44417134282b40f2a1b673" gracePeriod=30 Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.346734 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" containerName="nova-metadata-metadata" containerID="cri-o://26b5fa94e25ae25c7c39e4e637b9725316d70bb994d96e0bd933c7a55fbab585" gracePeriod=30 Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.348272 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-ovsdbserver-nb\") pod \"c13d308b-a780-44eb-8574-0590dc8316a8\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.348368 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-dns-swift-storage-0\") pod \"c13d308b-a780-44eb-8574-0590dc8316a8\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.348448 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-config\") pod \"c13d308b-a780-44eb-8574-0590dc8316a8\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.348557 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-ovsdbserver-sb\") pod \"c13d308b-a780-44eb-8574-0590dc8316a8\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.348593 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-dns-svc\") pod \"c13d308b-a780-44eb-8574-0590dc8316a8\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.348700 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4rdc\" (UniqueName: \"kubernetes.io/projected/c13d308b-a780-44eb-8574-0590dc8316a8-kube-api-access-z4rdc\") pod \"c13d308b-a780-44eb-8574-0590dc8316a8\" (UID: \"c13d308b-a780-44eb-8574-0590dc8316a8\") " Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.349100 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttqs6\" (UniqueName: \"kubernetes.io/projected/6a52d356-2098-4a61-be67-5519a2d2b78f-kube-api-access-ttqs6\") pod \"nova-cell1-conductor-0\" (UID: \"6a52d356-2098-4a61-be67-5519a2d2b78f\") " pod="openstack/nova-cell1-conductor-0" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.349151 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a52d356-2098-4a61-be67-5519a2d2b78f-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6a52d356-2098-4a61-be67-5519a2d2b78f\") " pod="openstack/nova-cell1-conductor-0" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.349295 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a52d356-2098-4a61-be67-5519a2d2b78f-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6a52d356-2098-4a61-be67-5519a2d2b78f\") " pod="openstack/nova-cell1-conductor-0" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.355578 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c13d308b-a780-44eb-8574-0590dc8316a8-kube-api-access-z4rdc" (OuterVolumeSpecName: "kube-api-access-z4rdc") pod "c13d308b-a780-44eb-8574-0590dc8316a8" (UID: "c13d308b-a780-44eb-8574-0590dc8316a8"). InnerVolumeSpecName "kube-api-access-z4rdc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.411938 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c13d308b-a780-44eb-8574-0590dc8316a8" (UID: "c13d308b-a780-44eb-8574-0590dc8316a8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.414452 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c13d308b-a780-44eb-8574-0590dc8316a8" (UID: "c13d308b-a780-44eb-8574-0590dc8316a8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.437853 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-config" (OuterVolumeSpecName: "config") pod "c13d308b-a780-44eb-8574-0590dc8316a8" (UID: "c13d308b-a780-44eb-8574-0590dc8316a8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.438986 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c13d308b-a780-44eb-8574-0590dc8316a8" (UID: "c13d308b-a780-44eb-8574-0590dc8316a8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.457241 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a52d356-2098-4a61-be67-5519a2d2b78f-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6a52d356-2098-4a61-be67-5519a2d2b78f\") " pod="openstack/nova-cell1-conductor-0" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.458550 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttqs6\" (UniqueName: \"kubernetes.io/projected/6a52d356-2098-4a61-be67-5519a2d2b78f-kube-api-access-ttqs6\") pod \"nova-cell1-conductor-0\" (UID: \"6a52d356-2098-4a61-be67-5519a2d2b78f\") " pod="openstack/nova-cell1-conductor-0" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.458686 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a52d356-2098-4a61-be67-5519a2d2b78f-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6a52d356-2098-4a61-be67-5519a2d2b78f\") " pod="openstack/nova-cell1-conductor-0" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.458982 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.459150 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4rdc\" (UniqueName: \"kubernetes.io/projected/c13d308b-a780-44eb-8574-0590dc8316a8-kube-api-access-z4rdc\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.459243 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.459397 5055 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.459481 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.463135 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a52d356-2098-4a61-be67-5519a2d2b78f-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6a52d356-2098-4a61-be67-5519a2d2b78f\") " pod="openstack/nova-cell1-conductor-0" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.473484 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c13d308b-a780-44eb-8574-0590dc8316a8" (UID: "c13d308b-a780-44eb-8574-0590dc8316a8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.473978 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a52d356-2098-4a61-be67-5519a2d2b78f-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6a52d356-2098-4a61-be67-5519a2d2b78f\") " pod="openstack/nova-cell1-conductor-0" Oct 11 07:16:16 crc kubenswrapper[5055]: E1011 07:16:16.475068 5055 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa1fb477_16ef_4bd5_85f2_66ff215f8ea9.slice/crio-521939f34b4938db63601bec086d69eec089b7529d44417134282b40f2a1b673.scope\": RecentStats: unable to find data in memory cache]" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.478267 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttqs6\" (UniqueName: \"kubernetes.io/projected/6a52d356-2098-4a61-be67-5519a2d2b78f-kube-api-access-ttqs6\") pod \"nova-cell1-conductor-0\" (UID: \"6a52d356-2098-4a61-be67-5519a2d2b78f\") " pod="openstack/nova-cell1-conductor-0" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.536798 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.561303 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c13d308b-a780-44eb-8574-0590dc8316a8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.664482 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:16:16 crc kubenswrapper[5055]: I1011 07:16:16.957648 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.124233 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bd785c49-64j5m" event={"ID":"c13d308b-a780-44eb-8574-0590dc8316a8","Type":"ContainerDied","Data":"483b092da9ad9cba31f808b1254ee2f56bb07dfc9273226ed65c44250a5f8331"} Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.124281 5055 scope.go:117] "RemoveContainer" containerID="aac783028ec9596d55d8bef35fe4aa03e5b126d8adc851cd8aea1aa6f04ad467" Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.124432 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bd785c49-64j5m" Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.129138 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6a52d356-2098-4a61-be67-5519a2d2b78f","Type":"ContainerStarted","Data":"e743b4fba078ddd6b6a47cf961b951770d0460a80991f775d631a8a09c51cbf1"} Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.141148 5055 generic.go:334] "Generic (PLEG): container finished" podID="fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" containerID="26b5fa94e25ae25c7c39e4e637b9725316d70bb994d96e0bd933c7a55fbab585" exitCode=0 Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.141189 5055 generic.go:334] "Generic (PLEG): container finished" podID="fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" containerID="521939f34b4938db63601bec086d69eec089b7529d44417134282b40f2a1b673" exitCode=143 Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.141264 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9","Type":"ContainerDied","Data":"26b5fa94e25ae25c7c39e4e637b9725316d70bb994d96e0bd933c7a55fbab585"} Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.141294 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9","Type":"ContainerDied","Data":"521939f34b4938db63601bec086d69eec089b7529d44417134282b40f2a1b673"} Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.152556 5055 generic.go:334] "Generic (PLEG): container finished" podID="3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" containerID="4009fdfa9244b6b2c3e9c55f9e2479a86b2457e65630baf744707264b98a0959" exitCode=143 Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.153369 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd","Type":"ContainerDied","Data":"4009fdfa9244b6b2c3e9c55f9e2479a86b2457e65630baf744707264b98a0959"} Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.172335 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bd785c49-64j5m"] Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.173191 5055 scope.go:117] "RemoveContainer" containerID="fff0f158566c1b8c2fe1c4a0cf07e68ef221a4c5c94e05c7eb576b98e99d98af" Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.180483 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84bd785c49-64j5m"] Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.234349 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.410573 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-combined-ca-bundle\") pod \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.410654 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-logs\") pod \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.410692 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-nova-metadata-tls-certs\") pod \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.410724 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2mcw\" (UniqueName: \"kubernetes.io/projected/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-kube-api-access-m2mcw\") pod \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.410848 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-config-data\") pod \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\" (UID: \"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9\") " Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.412161 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-logs" (OuterVolumeSpecName: "logs") pod "fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" (UID: "fa1fb477-16ef-4bd5-85f2-66ff215f8ea9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.417021 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-kube-api-access-m2mcw" (OuterVolumeSpecName: "kube-api-access-m2mcw") pod "fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" (UID: "fa1fb477-16ef-4bd5-85f2-66ff215f8ea9"). InnerVolumeSpecName "kube-api-access-m2mcw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.445922 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-config-data" (OuterVolumeSpecName: "config-data") pod "fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" (UID: "fa1fb477-16ef-4bd5-85f2-66ff215f8ea9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.449477 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" (UID: "fa1fb477-16ef-4bd5-85f2-66ff215f8ea9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.470023 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" (UID: "fa1fb477-16ef-4bd5-85f2-66ff215f8ea9"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.515630 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.515665 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.515681 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.515716 5055 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:17 crc kubenswrapper[5055]: I1011 07:16:17.515731 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2mcw\" (UniqueName: \"kubernetes.io/projected/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9-kube-api-access-m2mcw\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.028174 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.163043 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6a52d356-2098-4a61-be67-5519a2d2b78f","Type":"ContainerStarted","Data":"fb731c00bae126ce96a2762c9416854d8181fa8097933c9a989d30dcc1d84d82"} Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.163178 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.167532 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.167907 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"fa1fb477-16ef-4bd5-85f2-66ff215f8ea9","Type":"ContainerDied","Data":"17ce7f67952d76eb17278b539897acee53c8b172d0d22d667b8829ca84b4b66e"} Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.168202 5055 scope.go:117] "RemoveContainer" containerID="26b5fa94e25ae25c7c39e4e637b9725316d70bb994d96e0bd933c7a55fbab585" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.169232 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="c330ab35-8c68-44b0-91a5-e7d58920a253" containerName="nova-scheduler-scheduler" containerID="cri-o://26e2abdf5454e4568f480c1b6f0ba4535962d3ba9135adeb1158a0fd9c517759" gracePeriod=30 Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.202465 5055 scope.go:117] "RemoveContainer" containerID="521939f34b4938db63601bec086d69eec089b7529d44417134282b40f2a1b673" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.219520 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.21950154 podStartE2EDuration="2.21950154s" podCreationTimestamp="2025-10-11 07:16:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:16:18.185705484 +0000 UTC m=+1361.959979291" watchObservedRunningTime="2025-10-11 07:16:18.21950154 +0000 UTC m=+1361.993775347" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.220262 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.260383 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.275036 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:18 crc kubenswrapper[5055]: E1011 07:16:18.275582 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" containerName="nova-metadata-log" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.275612 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" containerName="nova-metadata-log" Oct 11 07:16:18 crc kubenswrapper[5055]: E1011 07:16:18.275643 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" containerName="nova-metadata-metadata" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.275654 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" containerName="nova-metadata-metadata" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.291015 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" containerName="nova-metadata-log" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.291083 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" containerName="nova-metadata-metadata" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.306430 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.313069 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.313276 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.319049 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.435865 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.435912 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/749782e2-8a84-42d3-ab86-edcf45344967-logs\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.435971 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.435991 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-config-data\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.436017 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z97tb\" (UniqueName: \"kubernetes.io/projected/749782e2-8a84-42d3-ab86-edcf45344967-kube-api-access-z97tb\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.538151 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.538204 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/749782e2-8a84-42d3-ab86-edcf45344967-logs\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.538258 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.538279 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-config-data\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.538306 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z97tb\" (UniqueName: \"kubernetes.io/projected/749782e2-8a84-42d3-ab86-edcf45344967-kube-api-access-z97tb\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.539285 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/749782e2-8a84-42d3-ab86-edcf45344967-logs\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.543519 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-config-data\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.545789 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.554388 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.556299 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z97tb\" (UniqueName: \"kubernetes.io/projected/749782e2-8a84-42d3-ab86-edcf45344967-kube-api-access-z97tb\") pod \"nova-metadata-0\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " pod="openstack/nova-metadata-0" Oct 11 07:16:18 crc kubenswrapper[5055]: I1011 07:16:18.629337 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:16:19 crc kubenswrapper[5055]: I1011 07:16:19.005452 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c13d308b-a780-44eb-8574-0590dc8316a8" path="/var/lib/kubelet/pods/c13d308b-a780-44eb-8574-0590dc8316a8/volumes" Oct 11 07:16:19 crc kubenswrapper[5055]: I1011 07:16:19.007468 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa1fb477-16ef-4bd5-85f2-66ff215f8ea9" path="/var/lib/kubelet/pods/fa1fb477-16ef-4bd5-85f2-66ff215f8ea9/volumes" Oct 11 07:16:19 crc kubenswrapper[5055]: I1011 07:16:19.087909 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:16:19 crc kubenswrapper[5055]: I1011 07:16:19.180793 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"749782e2-8a84-42d3-ab86-edcf45344967","Type":"ContainerStarted","Data":"770597d409b69d117ab5e6464d58d9b706549b315cabe46e4149141cbff3985c"} Oct 11 07:16:20 crc kubenswrapper[5055]: E1011 07:16:20.158324 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="26e2abdf5454e4568f480c1b6f0ba4535962d3ba9135adeb1158a0fd9c517759" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 11 07:16:20 crc kubenswrapper[5055]: E1011 07:16:20.161414 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="26e2abdf5454e4568f480c1b6f0ba4535962d3ba9135adeb1158a0fd9c517759" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 11 07:16:20 crc kubenswrapper[5055]: E1011 07:16:20.164259 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="26e2abdf5454e4568f480c1b6f0ba4535962d3ba9135adeb1158a0fd9c517759" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 11 07:16:20 crc kubenswrapper[5055]: E1011 07:16:20.164379 5055 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="c330ab35-8c68-44b0-91a5-e7d58920a253" containerName="nova-scheduler-scheduler" Oct 11 07:16:20 crc kubenswrapper[5055]: I1011 07:16:20.200232 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"749782e2-8a84-42d3-ab86-edcf45344967","Type":"ContainerStarted","Data":"d6309c7f73d90e8d6472587a67a00451c61588383fdc67aed9725fbba209c614"} Oct 11 07:16:20 crc kubenswrapper[5055]: I1011 07:16:20.200288 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"749782e2-8a84-42d3-ab86-edcf45344967","Type":"ContainerStarted","Data":"ca8c46bcaf0af78b1367ef031bfcedfbfe77697aa0f987cb1e615ae9187fb7c7"} Oct 11 07:16:20 crc kubenswrapper[5055]: I1011 07:16:20.237975 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.237958417 podStartE2EDuration="2.237958417s" podCreationTimestamp="2025-10-11 07:16:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:16:20.235187179 +0000 UTC m=+1364.009460996" watchObservedRunningTime="2025-10-11 07:16:20.237958417 +0000 UTC m=+1364.012232224" Oct 11 07:16:21 crc kubenswrapper[5055]: I1011 07:16:21.209235 5055 generic.go:334] "Generic (PLEG): container finished" podID="c330ab35-8c68-44b0-91a5-e7d58920a253" containerID="26e2abdf5454e4568f480c1b6f0ba4535962d3ba9135adeb1158a0fd9c517759" exitCode=0 Oct 11 07:16:21 crc kubenswrapper[5055]: I1011 07:16:21.209936 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c330ab35-8c68-44b0-91a5-e7d58920a253","Type":"ContainerDied","Data":"26e2abdf5454e4568f480c1b6f0ba4535962d3ba9135adeb1158a0fd9c517759"} Oct 11 07:16:21 crc kubenswrapper[5055]: I1011 07:16:21.390217 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 07:16:21 crc kubenswrapper[5055]: I1011 07:16:21.404821 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c330ab35-8c68-44b0-91a5-e7d58920a253-config-data\") pod \"c330ab35-8c68-44b0-91a5-e7d58920a253\" (UID: \"c330ab35-8c68-44b0-91a5-e7d58920a253\") " Oct 11 07:16:21 crc kubenswrapper[5055]: I1011 07:16:21.404953 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c330ab35-8c68-44b0-91a5-e7d58920a253-combined-ca-bundle\") pod \"c330ab35-8c68-44b0-91a5-e7d58920a253\" (UID: \"c330ab35-8c68-44b0-91a5-e7d58920a253\") " Oct 11 07:16:21 crc kubenswrapper[5055]: I1011 07:16:21.405132 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxl55\" (UniqueName: \"kubernetes.io/projected/c330ab35-8c68-44b0-91a5-e7d58920a253-kube-api-access-nxl55\") pod \"c330ab35-8c68-44b0-91a5-e7d58920a253\" (UID: \"c330ab35-8c68-44b0-91a5-e7d58920a253\") " Oct 11 07:16:21 crc kubenswrapper[5055]: I1011 07:16:21.411424 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c330ab35-8c68-44b0-91a5-e7d58920a253-kube-api-access-nxl55" (OuterVolumeSpecName: "kube-api-access-nxl55") pod "c330ab35-8c68-44b0-91a5-e7d58920a253" (UID: "c330ab35-8c68-44b0-91a5-e7d58920a253"). InnerVolumeSpecName "kube-api-access-nxl55". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:16:21 crc kubenswrapper[5055]: I1011 07:16:21.438722 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c330ab35-8c68-44b0-91a5-e7d58920a253-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c330ab35-8c68-44b0-91a5-e7d58920a253" (UID: "c330ab35-8c68-44b0-91a5-e7d58920a253"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:21 crc kubenswrapper[5055]: I1011 07:16:21.440161 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c330ab35-8c68-44b0-91a5-e7d58920a253-config-data" (OuterVolumeSpecName: "config-data") pod "c330ab35-8c68-44b0-91a5-e7d58920a253" (UID: "c330ab35-8c68-44b0-91a5-e7d58920a253"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:21 crc kubenswrapper[5055]: I1011 07:16:21.507739 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxl55\" (UniqueName: \"kubernetes.io/projected/c330ab35-8c68-44b0-91a5-e7d58920a253-kube-api-access-nxl55\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:21 crc kubenswrapper[5055]: I1011 07:16:21.507793 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c330ab35-8c68-44b0-91a5-e7d58920a253-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:21 crc kubenswrapper[5055]: I1011 07:16:21.507806 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c330ab35-8c68-44b0-91a5-e7d58920a253-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:21 crc kubenswrapper[5055]: I1011 07:16:21.632163 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 07:16:21 crc kubenswrapper[5055]: I1011 07:16:21.632369 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="cae844be-3146-4234-b3f8-6c3aba5defe8" containerName="kube-state-metrics" containerID="cri-o://4d315b6dea273ee157466cbc1b38c29f7c8fa1c21b574cd63842a2e7a3f4eb71" gracePeriod=30 Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.099724 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.119395 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pcd4\" (UniqueName: \"kubernetes.io/projected/cae844be-3146-4234-b3f8-6c3aba5defe8-kube-api-access-7pcd4\") pod \"cae844be-3146-4234-b3f8-6c3aba5defe8\" (UID: \"cae844be-3146-4234-b3f8-6c3aba5defe8\") " Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.124024 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cae844be-3146-4234-b3f8-6c3aba5defe8-kube-api-access-7pcd4" (OuterVolumeSpecName: "kube-api-access-7pcd4") pod "cae844be-3146-4234-b3f8-6c3aba5defe8" (UID: "cae844be-3146-4234-b3f8-6c3aba5defe8"). InnerVolumeSpecName "kube-api-access-7pcd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.222794 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7pcd4\" (UniqueName: \"kubernetes.io/projected/cae844be-3146-4234-b3f8-6c3aba5defe8-kube-api-access-7pcd4\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.241061 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"c330ab35-8c68-44b0-91a5-e7d58920a253","Type":"ContainerDied","Data":"a7a566c9f12536d073937b9c5bcc571acadca5f77fb46cce827dd0ebc3e6b8db"} Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.241122 5055 scope.go:117] "RemoveContainer" containerID="26e2abdf5454e4568f480c1b6f0ba4535962d3ba9135adeb1158a0fd9c517759" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.241127 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.243046 5055 generic.go:334] "Generic (PLEG): container finished" podID="cae844be-3146-4234-b3f8-6c3aba5defe8" containerID="4d315b6dea273ee157466cbc1b38c29f7c8fa1c21b574cd63842a2e7a3f4eb71" exitCode=2 Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.243154 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"cae844be-3146-4234-b3f8-6c3aba5defe8","Type":"ContainerDied","Data":"4d315b6dea273ee157466cbc1b38c29f7c8fa1c21b574cd63842a2e7a3f4eb71"} Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.243251 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"cae844be-3146-4234-b3f8-6c3aba5defe8","Type":"ContainerDied","Data":"fe3c6023a561eb422be90545fc77014f830803003c6fcd569d5ca86b65d484cc"} Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.243393 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.280817 5055 scope.go:117] "RemoveContainer" containerID="4d315b6dea273ee157466cbc1b38c29f7c8fa1c21b574cd63842a2e7a3f4eb71" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.280982 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.290091 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.301892 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.309292 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.317537 5055 scope.go:117] "RemoveContainer" containerID="4d315b6dea273ee157466cbc1b38c29f7c8fa1c21b574cd63842a2e7a3f4eb71" Oct 11 07:16:22 crc kubenswrapper[5055]: E1011 07:16:22.317986 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d315b6dea273ee157466cbc1b38c29f7c8fa1c21b574cd63842a2e7a3f4eb71\": container with ID starting with 4d315b6dea273ee157466cbc1b38c29f7c8fa1c21b574cd63842a2e7a3f4eb71 not found: ID does not exist" containerID="4d315b6dea273ee157466cbc1b38c29f7c8fa1c21b574cd63842a2e7a3f4eb71" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.318021 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d315b6dea273ee157466cbc1b38c29f7c8fa1c21b574cd63842a2e7a3f4eb71"} err="failed to get container status \"4d315b6dea273ee157466cbc1b38c29f7c8fa1c21b574cd63842a2e7a3f4eb71\": rpc error: code = NotFound desc = could not find container \"4d315b6dea273ee157466cbc1b38c29f7c8fa1c21b574cd63842a2e7a3f4eb71\": container with ID starting with 4d315b6dea273ee157466cbc1b38c29f7c8fa1c21b574cd63842a2e7a3f4eb71 not found: ID does not exist" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.318868 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:16:22 crc kubenswrapper[5055]: E1011 07:16:22.319354 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cae844be-3146-4234-b3f8-6c3aba5defe8" containerName="kube-state-metrics" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.319448 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="cae844be-3146-4234-b3f8-6c3aba5defe8" containerName="kube-state-metrics" Oct 11 07:16:22 crc kubenswrapper[5055]: E1011 07:16:22.319535 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c330ab35-8c68-44b0-91a5-e7d58920a253" containerName="nova-scheduler-scheduler" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.319588 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c330ab35-8c68-44b0-91a5-e7d58920a253" containerName="nova-scheduler-scheduler" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.319825 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="cae844be-3146-4234-b3f8-6c3aba5defe8" containerName="kube-state-metrics" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.319894 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c330ab35-8c68-44b0-91a5-e7d58920a253" containerName="nova-scheduler-scheduler" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.320528 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.323935 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.327458 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.332124 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.334386 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.334754 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.351287 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.359479 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.426826 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfm6l\" (UniqueName: \"kubernetes.io/projected/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-kube-api-access-wfm6l\") pod \"nova-scheduler-0\" (UID: \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.426951 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.427161 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-config-data\") pod \"nova-scheduler-0\" (UID: \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.528984 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-config-data\") pod \"nova-scheduler-0\" (UID: \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.529029 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.529093 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.529153 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfm6l\" (UniqueName: \"kubernetes.io/projected/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-kube-api-access-wfm6l\") pod \"nova-scheduler-0\" (UID: \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.529224 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxnp9\" (UniqueName: \"kubernetes.io/projected/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-api-access-rxnp9\") pod \"kube-state-metrics-0\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.529249 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.529317 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.541531 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.542427 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-config-data\") pod \"nova-scheduler-0\" (UID: \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.550598 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfm6l\" (UniqueName: \"kubernetes.io/projected/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-kube-api-access-wfm6l\") pod \"nova-scheduler-0\" (UID: \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\") " pod="openstack/nova-scheduler-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.630802 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.630928 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.630978 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.631035 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxnp9\" (UniqueName: \"kubernetes.io/projected/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-api-access-rxnp9\") pod \"kube-state-metrics-0\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.634484 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.634543 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.636204 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.639526 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.653245 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxnp9\" (UniqueName: \"kubernetes.io/projected/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-api-access-rxnp9\") pod \"kube-state-metrics-0\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " pod="openstack/kube-state-metrics-0" Oct 11 07:16:22 crc kubenswrapper[5055]: I1011 07:16:22.952016 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.006375 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c330ab35-8c68-44b0-91a5-e7d58920a253" path="/var/lib/kubelet/pods/c330ab35-8c68-44b0-91a5-e7d58920a253/volumes" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.007567 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cae844be-3146-4234-b3f8-6c3aba5defe8" path="/var/lib/kubelet/pods/cae844be-3146-4234-b3f8-6c3aba5defe8/volumes" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.059654 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:16:23 crc kubenswrapper[5055]: W1011 07:16:23.067683 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4add32bf_085d_4ed0_9e73_688f3c8bd8b9.slice/crio-40e4298b124acf760e3f32009d2bf28b0e61d5f30c6fbd0c2c8d5e4c1c5ac7a8 WatchSource:0}: Error finding container 40e4298b124acf760e3f32009d2bf28b0e61d5f30c6fbd0c2c8d5e4c1c5ac7a8: Status 404 returned error can't find the container with id 40e4298b124acf760e3f32009d2bf28b0e61d5f30c6fbd0c2c8d5e4c1c5ac7a8 Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.257818 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.259374 5055 generic.go:334] "Generic (PLEG): container finished" podID="3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" containerID="429129fbc12ca3fcba2f88e902e64ca87bfb6df566ec043ece4f8a07097b8466" exitCode=0 Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.259437 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd","Type":"ContainerDied","Data":"429129fbc12ca3fcba2f88e902e64ca87bfb6df566ec043ece4f8a07097b8466"} Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.259489 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd","Type":"ContainerDied","Data":"a04fecef429d59b89d249ca156fc6c70d2a64e9a6fed100268433d743454d42a"} Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.259507 5055 scope.go:117] "RemoveContainer" containerID="429129fbc12ca3fcba2f88e902e64ca87bfb6df566ec043ece4f8a07097b8466" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.263233 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4add32bf-085d-4ed0-9e73-688f3c8bd8b9","Type":"ContainerStarted","Data":"40e4298b124acf760e3f32009d2bf28b0e61d5f30c6fbd0c2c8d5e4c1c5ac7a8"} Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.298861 5055 scope.go:117] "RemoveContainer" containerID="4009fdfa9244b6b2c3e9c55f9e2479a86b2457e65630baf744707264b98a0959" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.322096 5055 scope.go:117] "RemoveContainer" containerID="429129fbc12ca3fcba2f88e902e64ca87bfb6df566ec043ece4f8a07097b8466" Oct 11 07:16:23 crc kubenswrapper[5055]: E1011 07:16:23.322929 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"429129fbc12ca3fcba2f88e902e64ca87bfb6df566ec043ece4f8a07097b8466\": container with ID starting with 429129fbc12ca3fcba2f88e902e64ca87bfb6df566ec043ece4f8a07097b8466 not found: ID does not exist" containerID="429129fbc12ca3fcba2f88e902e64ca87bfb6df566ec043ece4f8a07097b8466" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.322961 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"429129fbc12ca3fcba2f88e902e64ca87bfb6df566ec043ece4f8a07097b8466"} err="failed to get container status \"429129fbc12ca3fcba2f88e902e64ca87bfb6df566ec043ece4f8a07097b8466\": rpc error: code = NotFound desc = could not find container \"429129fbc12ca3fcba2f88e902e64ca87bfb6df566ec043ece4f8a07097b8466\": container with ID starting with 429129fbc12ca3fcba2f88e902e64ca87bfb6df566ec043ece4f8a07097b8466 not found: ID does not exist" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.322984 5055 scope.go:117] "RemoveContainer" containerID="4009fdfa9244b6b2c3e9c55f9e2479a86b2457e65630baf744707264b98a0959" Oct 11 07:16:23 crc kubenswrapper[5055]: E1011 07:16:23.323829 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4009fdfa9244b6b2c3e9c55f9e2479a86b2457e65630baf744707264b98a0959\": container with ID starting with 4009fdfa9244b6b2c3e9c55f9e2479a86b2457e65630baf744707264b98a0959 not found: ID does not exist" containerID="4009fdfa9244b6b2c3e9c55f9e2479a86b2457e65630baf744707264b98a0959" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.323917 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4009fdfa9244b6b2c3e9c55f9e2479a86b2457e65630baf744707264b98a0959"} err="failed to get container status \"4009fdfa9244b6b2c3e9c55f9e2479a86b2457e65630baf744707264b98a0959\": rpc error: code = NotFound desc = could not find container \"4009fdfa9244b6b2c3e9c55f9e2479a86b2457e65630baf744707264b98a0959\": container with ID starting with 4009fdfa9244b6b2c3e9c55f9e2479a86b2457e65630baf744707264b98a0959 not found: ID does not exist" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.429235 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.446344 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-config-data\") pod \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.446390 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-combined-ca-bundle\") pod \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.446456 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6s86v\" (UniqueName: \"kubernetes.io/projected/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-kube-api-access-6s86v\") pod \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.446480 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-logs\") pod \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\" (UID: \"3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd\") " Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.447182 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-logs" (OuterVolumeSpecName: "logs") pod "3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" (UID: "3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.452163 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-kube-api-access-6s86v" (OuterVolumeSpecName: "kube-api-access-6s86v") pod "3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" (UID: "3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd"). InnerVolumeSpecName "kube-api-access-6s86v". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.472177 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-config-data" (OuterVolumeSpecName: "config-data") pod "3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" (UID: "3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.478295 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" (UID: "3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.518880 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.519213 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="ceilometer-central-agent" containerID="cri-o://99b8e0e77bce7a9609d8c616fa1dd82df1eb7d6c7f7375efde8aa4a02a52f53f" gracePeriod=30 Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.519723 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="proxy-httpd" containerID="cri-o://d8afe9a5c979f3f1eb7ab18b882a35b01b602d9431a151fae8144443ca99ff0b" gracePeriod=30 Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.519848 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="sg-core" containerID="cri-o://81aebdce5ff0d2d71d85e23f0189a8a43dfc16d4436dd33b4e3f7fd7200a8c70" gracePeriod=30 Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.519890 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="ceilometer-notification-agent" containerID="cri-o://d91e250815d513271c4e8b7629aa20776bfc8120ee5e2c6c2e38372d72f9cfa1" gracePeriod=30 Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.549122 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.549165 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.549181 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6s86v\" (UniqueName: \"kubernetes.io/projected/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-kube-api-access-6s86v\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.549192 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.629443 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 11 07:16:23 crc kubenswrapper[5055]: I1011 07:16:23.629495 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.273931 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4add32bf-085d-4ed0-9e73-688f3c8bd8b9","Type":"ContainerStarted","Data":"ec2facf50e444faa12052207cfdadf93f534e702db147e2422b0ca62c80cd3f5"} Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.277698 5055 generic.go:334] "Generic (PLEG): container finished" podID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerID="d8afe9a5c979f3f1eb7ab18b882a35b01b602d9431a151fae8144443ca99ff0b" exitCode=0 Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.277730 5055 generic.go:334] "Generic (PLEG): container finished" podID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerID="81aebdce5ff0d2d71d85e23f0189a8a43dfc16d4436dd33b4e3f7fd7200a8c70" exitCode=2 Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.277741 5055 generic.go:334] "Generic (PLEG): container finished" podID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerID="99b8e0e77bce7a9609d8c616fa1dd82df1eb7d6c7f7375efde8aa4a02a52f53f" exitCode=0 Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.277795 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d3cc469-1b87-4aa6-ae06-a9101f3e1856","Type":"ContainerDied","Data":"d8afe9a5c979f3f1eb7ab18b882a35b01b602d9431a151fae8144443ca99ff0b"} Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.277865 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d3cc469-1b87-4aa6-ae06-a9101f3e1856","Type":"ContainerDied","Data":"81aebdce5ff0d2d71d85e23f0189a8a43dfc16d4436dd33b4e3f7fd7200a8c70"} Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.277946 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d3cc469-1b87-4aa6-ae06-a9101f3e1856","Type":"ContainerDied","Data":"99b8e0e77bce7a9609d8c616fa1dd82df1eb7d6c7f7375efde8aa4a02a52f53f"} Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.280564 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"af2ab03e-3011-4d6c-bbd5-ac5453d7785d","Type":"ContainerStarted","Data":"2e4ad5facb5f08bffeaad25b16cd84fad4909f52d97b67679d95970d047e100f"} Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.280645 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"af2ab03e-3011-4d6c-bbd5-ac5453d7785d","Type":"ContainerStarted","Data":"4225837af8e9089ccaa3d6d36348d97283f9a841dd3565bf33d45126d9944173"} Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.280695 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.282641 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.300872 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.300850647 podStartE2EDuration="2.300850647s" podCreationTimestamp="2025-10-11 07:16:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:16:24.288507408 +0000 UTC m=+1368.062781215" watchObservedRunningTime="2025-10-11 07:16:24.300850647 +0000 UTC m=+1368.075124454" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.315620 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.890258567 podStartE2EDuration="2.315603275s" podCreationTimestamp="2025-10-11 07:16:22 +0000 UTC" firstStartedPulling="2025-10-11 07:16:23.433374677 +0000 UTC m=+1367.207648484" lastFinishedPulling="2025-10-11 07:16:23.858719365 +0000 UTC m=+1367.632993192" observedRunningTime="2025-10-11 07:16:24.312106906 +0000 UTC m=+1368.086380743" watchObservedRunningTime="2025-10-11 07:16:24.315603275 +0000 UTC m=+1368.089877082" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.332987 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.340303 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.361686 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:24 crc kubenswrapper[5055]: E1011 07:16:24.362520 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" containerName="nova-api-log" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.362549 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" containerName="nova-api-log" Oct 11 07:16:24 crc kubenswrapper[5055]: E1011 07:16:24.362599 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" containerName="nova-api-api" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.362614 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" containerName="nova-api-api" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.363699 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" containerName="nova-api-api" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.363746 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" containerName="nova-api-log" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.366036 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.373388 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.380532 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.467751 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2825fd40-7391-4d98-be0b-7ee89947cbe8-config-data\") pod \"nova-api-0\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " pod="openstack/nova-api-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.467906 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8p6mz\" (UniqueName: \"kubernetes.io/projected/2825fd40-7391-4d98-be0b-7ee89947cbe8-kube-api-access-8p6mz\") pod \"nova-api-0\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " pod="openstack/nova-api-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.467997 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2825fd40-7391-4d98-be0b-7ee89947cbe8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " pod="openstack/nova-api-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.468035 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2825fd40-7391-4d98-be0b-7ee89947cbe8-logs\") pod \"nova-api-0\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " pod="openstack/nova-api-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.569744 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2825fd40-7391-4d98-be0b-7ee89947cbe8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " pod="openstack/nova-api-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.569821 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2825fd40-7391-4d98-be0b-7ee89947cbe8-logs\") pod \"nova-api-0\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " pod="openstack/nova-api-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.569881 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2825fd40-7391-4d98-be0b-7ee89947cbe8-config-data\") pod \"nova-api-0\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " pod="openstack/nova-api-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.569929 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8p6mz\" (UniqueName: \"kubernetes.io/projected/2825fd40-7391-4d98-be0b-7ee89947cbe8-kube-api-access-8p6mz\") pod \"nova-api-0\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " pod="openstack/nova-api-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.570863 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2825fd40-7391-4d98-be0b-7ee89947cbe8-logs\") pod \"nova-api-0\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " pod="openstack/nova-api-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.588320 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2825fd40-7391-4d98-be0b-7ee89947cbe8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " pod="openstack/nova-api-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.589613 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2825fd40-7391-4d98-be0b-7ee89947cbe8-config-data\") pod \"nova-api-0\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " pod="openstack/nova-api-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.595854 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8p6mz\" (UniqueName: \"kubernetes.io/projected/2825fd40-7391-4d98-be0b-7ee89947cbe8-kube-api-access-8p6mz\") pod \"nova-api-0\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " pod="openstack/nova-api-0" Oct 11 07:16:24 crc kubenswrapper[5055]: I1011 07:16:24.691543 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:16:25 crc kubenswrapper[5055]: I1011 07:16:25.006686 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd" path="/var/lib/kubelet/pods/3f24e7cc-b698-4558-bc8f-e9ee1e3e0fcd/volumes" Oct 11 07:16:25 crc kubenswrapper[5055]: I1011 07:16:25.152343 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:25 crc kubenswrapper[5055]: W1011 07:16:25.159002 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2825fd40_7391_4d98_be0b_7ee89947cbe8.slice/crio-ca60e8941a601cf4a3ea5964db06ee10798629f4c3561b5b6d65470625dbac1e WatchSource:0}: Error finding container ca60e8941a601cf4a3ea5964db06ee10798629f4c3561b5b6d65470625dbac1e: Status 404 returned error can't find the container with id ca60e8941a601cf4a3ea5964db06ee10798629f4c3561b5b6d65470625dbac1e Oct 11 07:16:25 crc kubenswrapper[5055]: I1011 07:16:25.292667 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2825fd40-7391-4d98-be0b-7ee89947cbe8","Type":"ContainerStarted","Data":"ca60e8941a601cf4a3ea5964db06ee10798629f4c3561b5b6d65470625dbac1e"} Oct 11 07:16:26 crc kubenswrapper[5055]: I1011 07:16:26.302040 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2825fd40-7391-4d98-be0b-7ee89947cbe8","Type":"ContainerStarted","Data":"2101413e734525dd5d889eb53077f8369b9f19c421f4caa63081eb34f4419ea0"} Oct 11 07:16:26 crc kubenswrapper[5055]: I1011 07:16:26.302081 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2825fd40-7391-4d98-be0b-7ee89947cbe8","Type":"ContainerStarted","Data":"3983c5a970b9d52b997e3014747def678ed61890a115634c34f516bacbe061c3"} Oct 11 07:16:26 crc kubenswrapper[5055]: I1011 07:16:26.325463 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.325447068 podStartE2EDuration="2.325447068s" podCreationTimestamp="2025-10-11 07:16:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:16:26.31737821 +0000 UTC m=+1370.091652027" watchObservedRunningTime="2025-10-11 07:16:26.325447068 +0000 UTC m=+1370.099720875" Oct 11 07:16:26 crc kubenswrapper[5055]: I1011 07:16:26.572682 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Oct 11 07:16:26 crc kubenswrapper[5055]: I1011 07:16:26.973725 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.116669 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-sg-core-conf-yaml\") pod \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.116831 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-run-httpd\") pod \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.116861 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-scripts\") pod \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.116945 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-combined-ca-bundle\") pod \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.116991 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-config-data\") pod \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.117051 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-log-httpd\") pod \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.117087 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vc8z\" (UniqueName: \"kubernetes.io/projected/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-kube-api-access-2vc8z\") pod \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\" (UID: \"3d3cc469-1b87-4aa6-ae06-a9101f3e1856\") " Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.117444 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3d3cc469-1b87-4aa6-ae06-a9101f3e1856" (UID: "3d3cc469-1b87-4aa6-ae06-a9101f3e1856"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.117815 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3d3cc469-1b87-4aa6-ae06-a9101f3e1856" (UID: "3d3cc469-1b87-4aa6-ae06-a9101f3e1856"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.127748 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-scripts" (OuterVolumeSpecName: "scripts") pod "3d3cc469-1b87-4aa6-ae06-a9101f3e1856" (UID: "3d3cc469-1b87-4aa6-ae06-a9101f3e1856"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.134144 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-kube-api-access-2vc8z" (OuterVolumeSpecName: "kube-api-access-2vc8z") pod "3d3cc469-1b87-4aa6-ae06-a9101f3e1856" (UID: "3d3cc469-1b87-4aa6-ae06-a9101f3e1856"). InnerVolumeSpecName "kube-api-access-2vc8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.161446 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3d3cc469-1b87-4aa6-ae06-a9101f3e1856" (UID: "3d3cc469-1b87-4aa6-ae06-a9101f3e1856"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.217159 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3d3cc469-1b87-4aa6-ae06-a9101f3e1856" (UID: "3d3cc469-1b87-4aa6-ae06-a9101f3e1856"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.219471 5055 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.219496 5055 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.219505 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.219514 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.219522 5055 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.219530 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vc8z\" (UniqueName: \"kubernetes.io/projected/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-kube-api-access-2vc8z\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.230305 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-config-data" (OuterVolumeSpecName: "config-data") pod "3d3cc469-1b87-4aa6-ae06-a9101f3e1856" (UID: "3d3cc469-1b87-4aa6-ae06-a9101f3e1856"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.326185 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d3cc469-1b87-4aa6-ae06-a9101f3e1856-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.331155 5055 generic.go:334] "Generic (PLEG): container finished" podID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerID="d91e250815d513271c4e8b7629aa20776bfc8120ee5e2c6c2e38372d72f9cfa1" exitCode=0 Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.331203 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d3cc469-1b87-4aa6-ae06-a9101f3e1856","Type":"ContainerDied","Data":"d91e250815d513271c4e8b7629aa20776bfc8120ee5e2c6c2e38372d72f9cfa1"} Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.331264 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.331284 5055 scope.go:117] "RemoveContainer" containerID="d8afe9a5c979f3f1eb7ab18b882a35b01b602d9431a151fae8144443ca99ff0b" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.331272 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3d3cc469-1b87-4aa6-ae06-a9101f3e1856","Type":"ContainerDied","Data":"7792c44896a50c744e498f501f06983fe39ee71bf7d4ec7f9d2cf18b7ef4e55d"} Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.368079 5055 scope.go:117] "RemoveContainer" containerID="81aebdce5ff0d2d71d85e23f0189a8a43dfc16d4436dd33b4e3f7fd7200a8c70" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.383627 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.399458 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.405554 5055 scope.go:117] "RemoveContainer" containerID="d91e250815d513271c4e8b7629aa20776bfc8120ee5e2c6c2e38372d72f9cfa1" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.411202 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:16:27 crc kubenswrapper[5055]: E1011 07:16:27.411609 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="ceilometer-central-agent" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.411632 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="ceilometer-central-agent" Oct 11 07:16:27 crc kubenswrapper[5055]: E1011 07:16:27.411648 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="ceilometer-notification-agent" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.411655 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="ceilometer-notification-agent" Oct 11 07:16:27 crc kubenswrapper[5055]: E1011 07:16:27.411677 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="sg-core" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.411683 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="sg-core" Oct 11 07:16:27 crc kubenswrapper[5055]: E1011 07:16:27.411703 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="proxy-httpd" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.411709 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="proxy-httpd" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.411953 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="sg-core" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.411969 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="ceilometer-notification-agent" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.411987 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="ceilometer-central-agent" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.412006 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" containerName="proxy-httpd" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.413801 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.415464 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.416980 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.417259 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.437091 5055 scope.go:117] "RemoveContainer" containerID="99b8e0e77bce7a9609d8c616fa1dd82df1eb7d6c7f7375efde8aa4a02a52f53f" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.445607 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.461321 5055 scope.go:117] "RemoveContainer" containerID="d8afe9a5c979f3f1eb7ab18b882a35b01b602d9431a151fae8144443ca99ff0b" Oct 11 07:16:27 crc kubenswrapper[5055]: E1011 07:16:27.461759 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8afe9a5c979f3f1eb7ab18b882a35b01b602d9431a151fae8144443ca99ff0b\": container with ID starting with d8afe9a5c979f3f1eb7ab18b882a35b01b602d9431a151fae8144443ca99ff0b not found: ID does not exist" containerID="d8afe9a5c979f3f1eb7ab18b882a35b01b602d9431a151fae8144443ca99ff0b" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.461815 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8afe9a5c979f3f1eb7ab18b882a35b01b602d9431a151fae8144443ca99ff0b"} err="failed to get container status \"d8afe9a5c979f3f1eb7ab18b882a35b01b602d9431a151fae8144443ca99ff0b\": rpc error: code = NotFound desc = could not find container \"d8afe9a5c979f3f1eb7ab18b882a35b01b602d9431a151fae8144443ca99ff0b\": container with ID starting with d8afe9a5c979f3f1eb7ab18b882a35b01b602d9431a151fae8144443ca99ff0b not found: ID does not exist" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.461841 5055 scope.go:117] "RemoveContainer" containerID="81aebdce5ff0d2d71d85e23f0189a8a43dfc16d4436dd33b4e3f7fd7200a8c70" Oct 11 07:16:27 crc kubenswrapper[5055]: E1011 07:16:27.462278 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81aebdce5ff0d2d71d85e23f0189a8a43dfc16d4436dd33b4e3f7fd7200a8c70\": container with ID starting with 81aebdce5ff0d2d71d85e23f0189a8a43dfc16d4436dd33b4e3f7fd7200a8c70 not found: ID does not exist" containerID="81aebdce5ff0d2d71d85e23f0189a8a43dfc16d4436dd33b4e3f7fd7200a8c70" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.462316 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81aebdce5ff0d2d71d85e23f0189a8a43dfc16d4436dd33b4e3f7fd7200a8c70"} err="failed to get container status \"81aebdce5ff0d2d71d85e23f0189a8a43dfc16d4436dd33b4e3f7fd7200a8c70\": rpc error: code = NotFound desc = could not find container \"81aebdce5ff0d2d71d85e23f0189a8a43dfc16d4436dd33b4e3f7fd7200a8c70\": container with ID starting with 81aebdce5ff0d2d71d85e23f0189a8a43dfc16d4436dd33b4e3f7fd7200a8c70 not found: ID does not exist" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.462345 5055 scope.go:117] "RemoveContainer" containerID="d91e250815d513271c4e8b7629aa20776bfc8120ee5e2c6c2e38372d72f9cfa1" Oct 11 07:16:27 crc kubenswrapper[5055]: E1011 07:16:27.462713 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d91e250815d513271c4e8b7629aa20776bfc8120ee5e2c6c2e38372d72f9cfa1\": container with ID starting with d91e250815d513271c4e8b7629aa20776bfc8120ee5e2c6c2e38372d72f9cfa1 not found: ID does not exist" containerID="d91e250815d513271c4e8b7629aa20776bfc8120ee5e2c6c2e38372d72f9cfa1" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.462736 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d91e250815d513271c4e8b7629aa20776bfc8120ee5e2c6c2e38372d72f9cfa1"} err="failed to get container status \"d91e250815d513271c4e8b7629aa20776bfc8120ee5e2c6c2e38372d72f9cfa1\": rpc error: code = NotFound desc = could not find container \"d91e250815d513271c4e8b7629aa20776bfc8120ee5e2c6c2e38372d72f9cfa1\": container with ID starting with d91e250815d513271c4e8b7629aa20776bfc8120ee5e2c6c2e38372d72f9cfa1 not found: ID does not exist" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.462751 5055 scope.go:117] "RemoveContainer" containerID="99b8e0e77bce7a9609d8c616fa1dd82df1eb7d6c7f7375efde8aa4a02a52f53f" Oct 11 07:16:27 crc kubenswrapper[5055]: E1011 07:16:27.463079 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99b8e0e77bce7a9609d8c616fa1dd82df1eb7d6c7f7375efde8aa4a02a52f53f\": container with ID starting with 99b8e0e77bce7a9609d8c616fa1dd82df1eb7d6c7f7375efde8aa4a02a52f53f not found: ID does not exist" containerID="99b8e0e77bce7a9609d8c616fa1dd82df1eb7d6c7f7375efde8aa4a02a52f53f" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.463101 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99b8e0e77bce7a9609d8c616fa1dd82df1eb7d6c7f7375efde8aa4a02a52f53f"} err="failed to get container status \"99b8e0e77bce7a9609d8c616fa1dd82df1eb7d6c7f7375efde8aa4a02a52f53f\": rpc error: code = NotFound desc = could not find container \"99b8e0e77bce7a9609d8c616fa1dd82df1eb7d6c7f7375efde8aa4a02a52f53f\": container with ID starting with 99b8e0e77bce7a9609d8c616fa1dd82df1eb7d6c7f7375efde8aa4a02a52f53f not found: ID does not exist" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.529928 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2eba38-a0db-4a96-80a5-28fbb5c96106-log-httpd\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.529995 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-config-data\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.530044 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-scripts\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.530064 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2eba38-a0db-4a96-80a5-28fbb5c96106-run-httpd\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.530156 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.530288 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjmfq\" (UniqueName: \"kubernetes.io/projected/1e2eba38-a0db-4a96-80a5-28fbb5c96106-kube-api-access-rjmfq\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.530625 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.530835 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.633246 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-scripts\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.633292 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2eba38-a0db-4a96-80a5-28fbb5c96106-run-httpd\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.633330 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.633370 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjmfq\" (UniqueName: \"kubernetes.io/projected/1e2eba38-a0db-4a96-80a5-28fbb5c96106-kube-api-access-rjmfq\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.633433 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.633466 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.633571 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2eba38-a0db-4a96-80a5-28fbb5c96106-log-httpd\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.633604 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-config-data\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.633973 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2eba38-a0db-4a96-80a5-28fbb5c96106-run-httpd\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.634091 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2eba38-a0db-4a96-80a5-28fbb5c96106-log-httpd\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.638410 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-scripts\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.645975 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.646400 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.647393 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.647731 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-config-data\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.653965 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjmfq\" (UniqueName: \"kubernetes.io/projected/1e2eba38-a0db-4a96-80a5-28fbb5c96106-kube-api-access-rjmfq\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.658359 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " pod="openstack/ceilometer-0" Oct 11 07:16:27 crc kubenswrapper[5055]: I1011 07:16:27.738579 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:16:28 crc kubenswrapper[5055]: I1011 07:16:28.189026 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:16:28 crc kubenswrapper[5055]: I1011 07:16:28.342799 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2eba38-a0db-4a96-80a5-28fbb5c96106","Type":"ContainerStarted","Data":"f7e7eb57eba85e32343dc4fcb1a936ae9cf4e77cae3a0510c11bf7754a5e31db"} Oct 11 07:16:28 crc kubenswrapper[5055]: I1011 07:16:28.629675 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 11 07:16:28 crc kubenswrapper[5055]: I1011 07:16:28.630106 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 11 07:16:29 crc kubenswrapper[5055]: I1011 07:16:29.006621 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d3cc469-1b87-4aa6-ae06-a9101f3e1856" path="/var/lib/kubelet/pods/3d3cc469-1b87-4aa6-ae06-a9101f3e1856/volumes" Oct 11 07:16:29 crc kubenswrapper[5055]: I1011 07:16:29.353318 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2eba38-a0db-4a96-80a5-28fbb5c96106","Type":"ContainerStarted","Data":"0399638230b5c7c7f963bef10961418eda20d6564c09f0e21446bdb9ae63c651"} Oct 11 07:16:29 crc kubenswrapper[5055]: I1011 07:16:29.641988 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="749782e2-8a84-42d3-ab86-edcf45344967" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.189:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 11 07:16:29 crc kubenswrapper[5055]: I1011 07:16:29.641988 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="749782e2-8a84-42d3-ab86-edcf45344967" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.189:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 11 07:16:30 crc kubenswrapper[5055]: I1011 07:16:30.386019 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2eba38-a0db-4a96-80a5-28fbb5c96106","Type":"ContainerStarted","Data":"fc056259743e4f10f36f17507b6f9360ddd90f01cb28e98e88d8f0f76c0424f4"} Oct 11 07:16:30 crc kubenswrapper[5055]: I1011 07:16:30.386749 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2eba38-a0db-4a96-80a5-28fbb5c96106","Type":"ContainerStarted","Data":"16be14a1fb16b39e557f57fdf94625b063467c8d744153870339bfc85e6bfaef"} Oct 11 07:16:32 crc kubenswrapper[5055]: I1011 07:16:32.405110 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2eba38-a0db-4a96-80a5-28fbb5c96106","Type":"ContainerStarted","Data":"952d63dd8e732d73b45770fc6f1374f4aab5e290a8239db7774ed57e6d027c84"} Oct 11 07:16:32 crc kubenswrapper[5055]: I1011 07:16:32.405633 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 11 07:16:32 crc kubenswrapper[5055]: I1011 07:16:32.429608 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.376043574 podStartE2EDuration="5.429591432s" podCreationTimestamp="2025-10-11 07:16:27 +0000 UTC" firstStartedPulling="2025-10-11 07:16:28.200336387 +0000 UTC m=+1371.974610194" lastFinishedPulling="2025-10-11 07:16:31.253884245 +0000 UTC m=+1375.028158052" observedRunningTime="2025-10-11 07:16:32.421597966 +0000 UTC m=+1376.195871773" watchObservedRunningTime="2025-10-11 07:16:32.429591432 +0000 UTC m=+1376.203865239" Oct 11 07:16:32 crc kubenswrapper[5055]: I1011 07:16:32.640430 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 11 07:16:32 crc kubenswrapper[5055]: I1011 07:16:32.666603 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 11 07:16:32 crc kubenswrapper[5055]: I1011 07:16:32.961088 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Oct 11 07:16:33 crc kubenswrapper[5055]: I1011 07:16:33.447180 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 11 07:16:34 crc kubenswrapper[5055]: I1011 07:16:34.691822 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 11 07:16:34 crc kubenswrapper[5055]: I1011 07:16:34.691889 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 11 07:16:35 crc kubenswrapper[5055]: I1011 07:16:35.774933 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2825fd40-7391-4d98-be0b-7ee89947cbe8" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.192:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 11 07:16:35 crc kubenswrapper[5055]: I1011 07:16:35.774965 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2825fd40-7391-4d98-be0b-7ee89947cbe8" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.192:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 11 07:16:38 crc kubenswrapper[5055]: I1011 07:16:38.635228 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 11 07:16:38 crc kubenswrapper[5055]: I1011 07:16:38.635938 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 11 07:16:38 crc kubenswrapper[5055]: I1011 07:16:38.642847 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 11 07:16:38 crc kubenswrapper[5055]: I1011 07:16:38.644754 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.382240 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.468286 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b81d5d-20b0-421e-a67e-abedea3c766f-combined-ca-bundle\") pod \"e5b81d5d-20b0-421e-a67e-abedea3c766f\" (UID: \"e5b81d5d-20b0-421e-a67e-abedea3c766f\") " Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.468547 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5b81d5d-20b0-421e-a67e-abedea3c766f-config-data\") pod \"e5b81d5d-20b0-421e-a67e-abedea3c766f\" (UID: \"e5b81d5d-20b0-421e-a67e-abedea3c766f\") " Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.468585 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxj9z\" (UniqueName: \"kubernetes.io/projected/e5b81d5d-20b0-421e-a67e-abedea3c766f-kube-api-access-wxj9z\") pod \"e5b81d5d-20b0-421e-a67e-abedea3c766f\" (UID: \"e5b81d5d-20b0-421e-a67e-abedea3c766f\") " Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.473878 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5b81d5d-20b0-421e-a67e-abedea3c766f-kube-api-access-wxj9z" (OuterVolumeSpecName: "kube-api-access-wxj9z") pod "e5b81d5d-20b0-421e-a67e-abedea3c766f" (UID: "e5b81d5d-20b0-421e-a67e-abedea3c766f"). InnerVolumeSpecName "kube-api-access-wxj9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.475648 5055 generic.go:334] "Generic (PLEG): container finished" podID="e5b81d5d-20b0-421e-a67e-abedea3c766f" containerID="d04caa2dc645f7bbb8682de12d2890e086f10d0e8c16d48ca42d7795e5ba4efa" exitCode=137 Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.475703 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.475698 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e5b81d5d-20b0-421e-a67e-abedea3c766f","Type":"ContainerDied","Data":"d04caa2dc645f7bbb8682de12d2890e086f10d0e8c16d48ca42d7795e5ba4efa"} Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.475748 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e5b81d5d-20b0-421e-a67e-abedea3c766f","Type":"ContainerDied","Data":"9ed4838e161d0ebc4b5b72abb922faa095f682871fe46953d4430f2795716cbc"} Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.475786 5055 scope.go:117] "RemoveContainer" containerID="d04caa2dc645f7bbb8682de12d2890e086f10d0e8c16d48ca42d7795e5ba4efa" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.494996 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5b81d5d-20b0-421e-a67e-abedea3c766f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e5b81d5d-20b0-421e-a67e-abedea3c766f" (UID: "e5b81d5d-20b0-421e-a67e-abedea3c766f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.508585 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5b81d5d-20b0-421e-a67e-abedea3c766f-config-data" (OuterVolumeSpecName: "config-data") pod "e5b81d5d-20b0-421e-a67e-abedea3c766f" (UID: "e5b81d5d-20b0-421e-a67e-abedea3c766f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.570883 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5b81d5d-20b0-421e-a67e-abedea3c766f-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.570919 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxj9z\" (UniqueName: \"kubernetes.io/projected/e5b81d5d-20b0-421e-a67e-abedea3c766f-kube-api-access-wxj9z\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.570931 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5b81d5d-20b0-421e-a67e-abedea3c766f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.587894 5055 scope.go:117] "RemoveContainer" containerID="d04caa2dc645f7bbb8682de12d2890e086f10d0e8c16d48ca42d7795e5ba4efa" Oct 11 07:16:40 crc kubenswrapper[5055]: E1011 07:16:40.589572 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d04caa2dc645f7bbb8682de12d2890e086f10d0e8c16d48ca42d7795e5ba4efa\": container with ID starting with d04caa2dc645f7bbb8682de12d2890e086f10d0e8c16d48ca42d7795e5ba4efa not found: ID does not exist" containerID="d04caa2dc645f7bbb8682de12d2890e086f10d0e8c16d48ca42d7795e5ba4efa" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.589626 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d04caa2dc645f7bbb8682de12d2890e086f10d0e8c16d48ca42d7795e5ba4efa"} err="failed to get container status \"d04caa2dc645f7bbb8682de12d2890e086f10d0e8c16d48ca42d7795e5ba4efa\": rpc error: code = NotFound desc = could not find container \"d04caa2dc645f7bbb8682de12d2890e086f10d0e8c16d48ca42d7795e5ba4efa\": container with ID starting with d04caa2dc645f7bbb8682de12d2890e086f10d0e8c16d48ca42d7795e5ba4efa not found: ID does not exist" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.819043 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.827410 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.839621 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 07:16:40 crc kubenswrapper[5055]: E1011 07:16:40.840083 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5b81d5d-20b0-421e-a67e-abedea3c766f" containerName="nova-cell1-novncproxy-novncproxy" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.840106 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5b81d5d-20b0-421e-a67e-abedea3c766f" containerName="nova-cell1-novncproxy-novncproxy" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.840352 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5b81d5d-20b0-421e-a67e-abedea3c766f" containerName="nova-cell1-novncproxy-novncproxy" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.841062 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.843689 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.844415 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.844614 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.852046 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.979077 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wlzr\" (UniqueName: \"kubernetes.io/projected/9eb6d469-724e-4dc7-943c-60454db39c60-kube-api-access-8wlzr\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.979147 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.979389 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.979565 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:40 crc kubenswrapper[5055]: I1011 07:16:40.979796 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:41 crc kubenswrapper[5055]: I1011 07:16:41.010127 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5b81d5d-20b0-421e-a67e-abedea3c766f" path="/var/lib/kubelet/pods/e5b81d5d-20b0-421e-a67e-abedea3c766f/volumes" Oct 11 07:16:41 crc kubenswrapper[5055]: I1011 07:16:41.081137 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:41 crc kubenswrapper[5055]: I1011 07:16:41.081227 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wlzr\" (UniqueName: \"kubernetes.io/projected/9eb6d469-724e-4dc7-943c-60454db39c60-kube-api-access-8wlzr\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:41 crc kubenswrapper[5055]: I1011 07:16:41.081286 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:41 crc kubenswrapper[5055]: I1011 07:16:41.081429 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:41 crc kubenswrapper[5055]: I1011 07:16:41.081490 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:41 crc kubenswrapper[5055]: I1011 07:16:41.085782 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:41 crc kubenswrapper[5055]: I1011 07:16:41.087288 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:41 crc kubenswrapper[5055]: I1011 07:16:41.088100 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:41 crc kubenswrapper[5055]: I1011 07:16:41.088543 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:41 crc kubenswrapper[5055]: I1011 07:16:41.099643 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wlzr\" (UniqueName: \"kubernetes.io/projected/9eb6d469-724e-4dc7-943c-60454db39c60-kube-api-access-8wlzr\") pod \"nova-cell1-novncproxy-0\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:41 crc kubenswrapper[5055]: I1011 07:16:41.178363 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:41 crc kubenswrapper[5055]: I1011 07:16:41.638833 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 07:16:41 crc kubenswrapper[5055]: W1011 07:16:41.644112 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9eb6d469_724e_4dc7_943c_60454db39c60.slice/crio-f6972eb41950d9e30be213715d7741d5bc4508bbc234e6f18dd44c5f48eba493 WatchSource:0}: Error finding container f6972eb41950d9e30be213715d7741d5bc4508bbc234e6f18dd44c5f48eba493: Status 404 returned error can't find the container with id f6972eb41950d9e30be213715d7741d5bc4508bbc234e6f18dd44c5f48eba493 Oct 11 07:16:42 crc kubenswrapper[5055]: I1011 07:16:42.500194 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9eb6d469-724e-4dc7-943c-60454db39c60","Type":"ContainerStarted","Data":"d21aed6aaefbc9580be9ca7f2e6eb8ebd1a3f5f50e101c4c845a2f3a24925d68"} Oct 11 07:16:42 crc kubenswrapper[5055]: I1011 07:16:42.500878 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9eb6d469-724e-4dc7-943c-60454db39c60","Type":"ContainerStarted","Data":"f6972eb41950d9e30be213715d7741d5bc4508bbc234e6f18dd44c5f48eba493"} Oct 11 07:16:42 crc kubenswrapper[5055]: I1011 07:16:42.520293 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.520270856 podStartE2EDuration="2.520270856s" podCreationTimestamp="2025-10-11 07:16:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:16:42.516006595 +0000 UTC m=+1386.290280402" watchObservedRunningTime="2025-10-11 07:16:42.520270856 +0000 UTC m=+1386.294544663" Oct 11 07:16:44 crc kubenswrapper[5055]: I1011 07:16:44.695990 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 11 07:16:44 crc kubenswrapper[5055]: I1011 07:16:44.696812 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 11 07:16:44 crc kubenswrapper[5055]: I1011 07:16:44.696909 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 11 07:16:44 crc kubenswrapper[5055]: I1011 07:16:44.699123 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.522866 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.526584 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.731118 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d4d96bb9-xs84k"] Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.734426 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.746544 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d4d96bb9-xs84k"] Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.866000 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-dns-swift-storage-0\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.866120 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-dns-svc\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.866150 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-config\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.866330 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-ovsdbserver-sb\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.866410 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-ovsdbserver-nb\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.866476 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8xzf\" (UniqueName: \"kubernetes.io/projected/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-kube-api-access-k8xzf\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.967747 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-dns-swift-storage-0\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.967854 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-dns-svc\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.967886 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-config\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.967939 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-ovsdbserver-sb\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.967978 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-ovsdbserver-nb\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.968029 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8xzf\" (UniqueName: \"kubernetes.io/projected/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-kube-api-access-k8xzf\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.968662 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-dns-swift-storage-0\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.968996 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-config\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.969057 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-dns-svc\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.969083 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-ovsdbserver-nb\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.969087 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-ovsdbserver-sb\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:45 crc kubenswrapper[5055]: I1011 07:16:45.993868 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8xzf\" (UniqueName: \"kubernetes.io/projected/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-kube-api-access-k8xzf\") pod \"dnsmasq-dns-6d4d96bb9-xs84k\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:46 crc kubenswrapper[5055]: I1011 07:16:46.063867 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:46 crc kubenswrapper[5055]: I1011 07:16:46.178847 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:46 crc kubenswrapper[5055]: I1011 07:16:46.380661 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d4d96bb9-xs84k"] Oct 11 07:16:46 crc kubenswrapper[5055]: I1011 07:16:46.545910 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" event={"ID":"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac","Type":"ContainerStarted","Data":"fa70a8879a5b484a5d340dad79d572b10ce3dd35a64daf5f25124cf0875d44b6"} Oct 11 07:16:47 crc kubenswrapper[5055]: I1011 07:16:47.554268 5055 generic.go:334] "Generic (PLEG): container finished" podID="e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" containerID="33f5af8288f5c5d91a066e3ea5ce9105889c851ba2cc9c6788da707eb3c54eca" exitCode=0 Oct 11 07:16:47 crc kubenswrapper[5055]: I1011 07:16:47.555620 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" event={"ID":"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac","Type":"ContainerDied","Data":"33f5af8288f5c5d91a066e3ea5ce9105889c851ba2cc9c6788da707eb3c54eca"} Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.076105 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.076656 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="ceilometer-central-agent" containerID="cri-o://0399638230b5c7c7f963bef10961418eda20d6564c09f0e21446bdb9ae63c651" gracePeriod=30 Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.076730 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="proxy-httpd" containerID="cri-o://952d63dd8e732d73b45770fc6f1374f4aab5e290a8239db7774ed57e6d027c84" gracePeriod=30 Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.076810 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="ceilometer-notification-agent" containerID="cri-o://16be14a1fb16b39e557f57fdf94625b063467c8d744153870339bfc85e6bfaef" gracePeriod=30 Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.076822 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="sg-core" containerID="cri-o://fc056259743e4f10f36f17507b6f9360ddd90f01cb28e98e88d8f0f76c0424f4" gracePeriod=30 Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.089943 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.223455 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.564615 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" event={"ID":"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac","Type":"ContainerStarted","Data":"d602498d4dd07089c1c212cf834fc53bf0f377b39657719ccd2cf1f0f61cf4e8"} Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.564743 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.567452 5055 generic.go:334] "Generic (PLEG): container finished" podID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerID="952d63dd8e732d73b45770fc6f1374f4aab5e290a8239db7774ed57e6d027c84" exitCode=0 Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.567493 5055 generic.go:334] "Generic (PLEG): container finished" podID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerID="fc056259743e4f10f36f17507b6f9360ddd90f01cb28e98e88d8f0f76c0424f4" exitCode=2 Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.567501 5055 generic.go:334] "Generic (PLEG): container finished" podID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerID="0399638230b5c7c7f963bef10961418eda20d6564c09f0e21446bdb9ae63c651" exitCode=0 Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.567533 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2eba38-a0db-4a96-80a5-28fbb5c96106","Type":"ContainerDied","Data":"952d63dd8e732d73b45770fc6f1374f4aab5e290a8239db7774ed57e6d027c84"} Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.567571 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2eba38-a0db-4a96-80a5-28fbb5c96106","Type":"ContainerDied","Data":"fc056259743e4f10f36f17507b6f9360ddd90f01cb28e98e88d8f0f76c0424f4"} Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.567583 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2eba38-a0db-4a96-80a5-28fbb5c96106","Type":"ContainerDied","Data":"0399638230b5c7c7f963bef10961418eda20d6564c09f0e21446bdb9ae63c651"} Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.567729 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2825fd40-7391-4d98-be0b-7ee89947cbe8" containerName="nova-api-log" containerID="cri-o://3983c5a970b9d52b997e3014747def678ed61890a115634c34f516bacbe061c3" gracePeriod=30 Oct 11 07:16:48 crc kubenswrapper[5055]: I1011 07:16:48.567758 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2825fd40-7391-4d98-be0b-7ee89947cbe8" containerName="nova-api-api" containerID="cri-o://2101413e734525dd5d889eb53077f8369b9f19c421f4caa63081eb34f4419ea0" gracePeriod=30 Oct 11 07:16:49 crc kubenswrapper[5055]: I1011 07:16:49.577332 5055 generic.go:334] "Generic (PLEG): container finished" podID="2825fd40-7391-4d98-be0b-7ee89947cbe8" containerID="3983c5a970b9d52b997e3014747def678ed61890a115634c34f516bacbe061c3" exitCode=143 Oct 11 07:16:49 crc kubenswrapper[5055]: I1011 07:16:49.578692 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2825fd40-7391-4d98-be0b-7ee89947cbe8","Type":"ContainerDied","Data":"3983c5a970b9d52b997e3014747def678ed61890a115634c34f516bacbe061c3"} Oct 11 07:16:50 crc kubenswrapper[5055]: I1011 07:16:50.598712 5055 generic.go:334] "Generic (PLEG): container finished" podID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerID="16be14a1fb16b39e557f57fdf94625b063467c8d744153870339bfc85e6bfaef" exitCode=0 Oct 11 07:16:50 crc kubenswrapper[5055]: I1011 07:16:50.598796 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2eba38-a0db-4a96-80a5-28fbb5c96106","Type":"ContainerDied","Data":"16be14a1fb16b39e557f57fdf94625b063467c8d744153870339bfc85e6bfaef"} Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.873373 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.901720 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" podStartSLOduration=5.901701246 podStartE2EDuration="5.901701246s" podCreationTimestamp="2025-10-11 07:16:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:16:48.587374341 +0000 UTC m=+1392.361648148" watchObservedRunningTime="2025-10-11 07:16:50.901701246 +0000 UTC m=+1394.675975053" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.978039 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-combined-ca-bundle\") pod \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.978122 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-config-data\") pod \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.978193 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-sg-core-conf-yaml\") pod \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.978227 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-scripts\") pod \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.978264 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2eba38-a0db-4a96-80a5-28fbb5c96106-run-httpd\") pod \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.978280 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjmfq\" (UniqueName: \"kubernetes.io/projected/1e2eba38-a0db-4a96-80a5-28fbb5c96106-kube-api-access-rjmfq\") pod \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.978323 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2eba38-a0db-4a96-80a5-28fbb5c96106-log-httpd\") pod \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.978360 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-ceilometer-tls-certs\") pod \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.978550 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e2eba38-a0db-4a96-80a5-28fbb5c96106-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1e2eba38-a0db-4a96-80a5-28fbb5c96106" (UID: "1e2eba38-a0db-4a96-80a5-28fbb5c96106"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.978974 5055 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2eba38-a0db-4a96-80a5-28fbb5c96106-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.979004 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e2eba38-a0db-4a96-80a5-28fbb5c96106-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1e2eba38-a0db-4a96-80a5-28fbb5c96106" (UID: "1e2eba38-a0db-4a96-80a5-28fbb5c96106"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.987517 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-scripts" (OuterVolumeSpecName: "scripts") pod "1e2eba38-a0db-4a96-80a5-28fbb5c96106" (UID: "1e2eba38-a0db-4a96-80a5-28fbb5c96106"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:50.992461 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e2eba38-a0db-4a96-80a5-28fbb5c96106-kube-api-access-rjmfq" (OuterVolumeSpecName: "kube-api-access-rjmfq") pod "1e2eba38-a0db-4a96-80a5-28fbb5c96106" (UID: "1e2eba38-a0db-4a96-80a5-28fbb5c96106"). InnerVolumeSpecName "kube-api-access-rjmfq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.018187 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1e2eba38-a0db-4a96-80a5-28fbb5c96106" (UID: "1e2eba38-a0db-4a96-80a5-28fbb5c96106"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.030314 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "1e2eba38-a0db-4a96-80a5-28fbb5c96106" (UID: "1e2eba38-a0db-4a96-80a5-28fbb5c96106"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.053557 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e2eba38-a0db-4a96-80a5-28fbb5c96106" (UID: "1e2eba38-a0db-4a96-80a5-28fbb5c96106"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.079642 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-config-data" (OuterVolumeSpecName: "config-data") pod "1e2eba38-a0db-4a96-80a5-28fbb5c96106" (UID: "1e2eba38-a0db-4a96-80a5-28fbb5c96106"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.081604 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-config-data\") pod \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\" (UID: \"1e2eba38-a0db-4a96-80a5-28fbb5c96106\") " Oct 11 07:16:51 crc kubenswrapper[5055]: W1011 07:16:51.081855 5055 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/1e2eba38-a0db-4a96-80a5-28fbb5c96106/volumes/kubernetes.io~secret/config-data Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.081880 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-config-data" (OuterVolumeSpecName: "config-data") pod "1e2eba38-a0db-4a96-80a5-28fbb5c96106" (UID: "1e2eba38-a0db-4a96-80a5-28fbb5c96106"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.082307 5055 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.082323 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.082335 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjmfq\" (UniqueName: \"kubernetes.io/projected/1e2eba38-a0db-4a96-80a5-28fbb5c96106-kube-api-access-rjmfq\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.082344 5055 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e2eba38-a0db-4a96-80a5-28fbb5c96106-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.082353 5055 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.082362 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.082370 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e2eba38-a0db-4a96-80a5-28fbb5c96106-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.178707 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.197403 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.609446 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e2eba38-a0db-4a96-80a5-28fbb5c96106","Type":"ContainerDied","Data":"f7e7eb57eba85e32343dc4fcb1a936ae9cf4e77cae3a0510c11bf7754a5e31db"} Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.609497 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.609797 5055 scope.go:117] "RemoveContainer" containerID="952d63dd8e732d73b45770fc6f1374f4aab5e290a8239db7774ed57e6d027c84" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.624527 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.635621 5055 scope.go:117] "RemoveContainer" containerID="fc056259743e4f10f36f17507b6f9360ddd90f01cb28e98e88d8f0f76c0424f4" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.646029 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.657610 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.665441 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:16:51 crc kubenswrapper[5055]: E1011 07:16:51.665836 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="sg-core" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.665853 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="sg-core" Oct 11 07:16:51 crc kubenswrapper[5055]: E1011 07:16:51.665869 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="ceilometer-central-agent" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.665877 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="ceilometer-central-agent" Oct 11 07:16:51 crc kubenswrapper[5055]: E1011 07:16:51.665897 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="proxy-httpd" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.665903 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="proxy-httpd" Oct 11 07:16:51 crc kubenswrapper[5055]: E1011 07:16:51.665930 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="ceilometer-notification-agent" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.665936 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="ceilometer-notification-agent" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.666118 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="sg-core" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.666143 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="ceilometer-central-agent" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.666153 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="proxy-httpd" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.666173 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" containerName="ceilometer-notification-agent" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.668041 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.668393 5055 scope.go:117] "RemoveContainer" containerID="16be14a1fb16b39e557f57fdf94625b063467c8d744153870339bfc85e6bfaef" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.675819 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.675877 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.675901 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.702656 5055 scope.go:117] "RemoveContainer" containerID="0399638230b5c7c7f963bef10961418eda20d6564c09f0e21446bdb9ae63c651" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.709955 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.793827 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-scripts\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.793879 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.793906 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3cc22641-a524-415a-8377-580664ed5d90-run-httpd\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.793925 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjdwx\" (UniqueName: \"kubernetes.io/projected/3cc22641-a524-415a-8377-580664ed5d90-kube-api-access-sjdwx\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.793944 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.794043 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-config-data\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.794095 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3cc22641-a524-415a-8377-580664ed5d90-log-httpd\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.794135 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.837276 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-6x24c"] Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.838699 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.840903 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.841217 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.845307 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-6x24c"] Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.895910 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-scripts\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.895954 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-6x24c\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.895981 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.896008 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3cc22641-a524-415a-8377-580664ed5d90-run-httpd\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.896027 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjdwx\" (UniqueName: \"kubernetes.io/projected/3cc22641-a524-415a-8377-580664ed5d90-kube-api-access-sjdwx\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.896045 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.896097 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-config-data\") pod \"nova-cell1-cell-mapping-6x24c\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.896130 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vg8rj\" (UniqueName: \"kubernetes.io/projected/e7982ac6-3330-40ea-a74b-1f512b13b004-kube-api-access-vg8rj\") pod \"nova-cell1-cell-mapping-6x24c\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.896161 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-config-data\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.896220 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-scripts\") pod \"nova-cell1-cell-mapping-6x24c\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.896252 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3cc22641-a524-415a-8377-580664ed5d90-log-httpd\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.896296 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.898728 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3cc22641-a524-415a-8377-580664ed5d90-run-httpd\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.901154 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.901525 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.901792 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3cc22641-a524-415a-8377-580664ed5d90-log-httpd\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.902313 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-config-data\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.904626 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.905490 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-scripts\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.920162 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjdwx\" (UniqueName: \"kubernetes.io/projected/3cc22641-a524-415a-8377-580664ed5d90-kube-api-access-sjdwx\") pod \"ceilometer-0\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " pod="openstack/ceilometer-0" Oct 11 07:16:51 crc kubenswrapper[5055]: I1011 07:16:51.991892 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:51.998369 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-6x24c\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:51.998491 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-config-data\") pod \"nova-cell1-cell-mapping-6x24c\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:51.998536 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vg8rj\" (UniqueName: \"kubernetes.io/projected/e7982ac6-3330-40ea-a74b-1f512b13b004-kube-api-access-vg8rj\") pod \"nova-cell1-cell-mapping-6x24c\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:51.998600 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-scripts\") pod \"nova-cell1-cell-mapping-6x24c\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.004023 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-scripts\") pod \"nova-cell1-cell-mapping-6x24c\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.004218 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-config-data\") pod \"nova-cell1-cell-mapping-6x24c\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.004745 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-6x24c\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.021466 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vg8rj\" (UniqueName: \"kubernetes.io/projected/e7982ac6-3330-40ea-a74b-1f512b13b004-kube-api-access-vg8rj\") pod \"nova-cell1-cell-mapping-6x24c\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.088694 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.184637 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.311970 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8p6mz\" (UniqueName: \"kubernetes.io/projected/2825fd40-7391-4d98-be0b-7ee89947cbe8-kube-api-access-8p6mz\") pod \"2825fd40-7391-4d98-be0b-7ee89947cbe8\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.312527 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2825fd40-7391-4d98-be0b-7ee89947cbe8-config-data\") pod \"2825fd40-7391-4d98-be0b-7ee89947cbe8\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.312608 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2825fd40-7391-4d98-be0b-7ee89947cbe8-combined-ca-bundle\") pod \"2825fd40-7391-4d98-be0b-7ee89947cbe8\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.312693 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2825fd40-7391-4d98-be0b-7ee89947cbe8-logs\") pod \"2825fd40-7391-4d98-be0b-7ee89947cbe8\" (UID: \"2825fd40-7391-4d98-be0b-7ee89947cbe8\") " Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.313200 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2825fd40-7391-4d98-be0b-7ee89947cbe8-logs" (OuterVolumeSpecName: "logs") pod "2825fd40-7391-4d98-be0b-7ee89947cbe8" (UID: "2825fd40-7391-4d98-be0b-7ee89947cbe8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.319888 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2825fd40-7391-4d98-be0b-7ee89947cbe8-kube-api-access-8p6mz" (OuterVolumeSpecName: "kube-api-access-8p6mz") pod "2825fd40-7391-4d98-be0b-7ee89947cbe8" (UID: "2825fd40-7391-4d98-be0b-7ee89947cbe8"). InnerVolumeSpecName "kube-api-access-8p6mz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.363299 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2825fd40-7391-4d98-be0b-7ee89947cbe8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2825fd40-7391-4d98-be0b-7ee89947cbe8" (UID: "2825fd40-7391-4d98-be0b-7ee89947cbe8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.364924 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2825fd40-7391-4d98-be0b-7ee89947cbe8-config-data" (OuterVolumeSpecName: "config-data") pod "2825fd40-7391-4d98-be0b-7ee89947cbe8" (UID: "2825fd40-7391-4d98-be0b-7ee89947cbe8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.415044 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2825fd40-7391-4d98-be0b-7ee89947cbe8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.415083 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2825fd40-7391-4d98-be0b-7ee89947cbe8-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.415093 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8p6mz\" (UniqueName: \"kubernetes.io/projected/2825fd40-7391-4d98-be0b-7ee89947cbe8-kube-api-access-8p6mz\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.415104 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2825fd40-7391-4d98-be0b-7ee89947cbe8-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.560827 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:16:52 crc kubenswrapper[5055]: W1011 07:16:52.570293 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3cc22641_a524_415a_8377_580664ed5d90.slice/crio-e93036b1c1f05ad35709c2b098240496f0383f66af42c47a4063ed8587f8d4ed WatchSource:0}: Error finding container e93036b1c1f05ad35709c2b098240496f0383f66af42c47a4063ed8587f8d4ed: Status 404 returned error can't find the container with id e93036b1c1f05ad35709c2b098240496f0383f66af42c47a4063ed8587f8d4ed Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.618237 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3cc22641-a524-415a-8377-580664ed5d90","Type":"ContainerStarted","Data":"e93036b1c1f05ad35709c2b098240496f0383f66af42c47a4063ed8587f8d4ed"} Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.620074 5055 generic.go:334] "Generic (PLEG): container finished" podID="2825fd40-7391-4d98-be0b-7ee89947cbe8" containerID="2101413e734525dd5d889eb53077f8369b9f19c421f4caa63081eb34f4419ea0" exitCode=0 Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.620157 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2825fd40-7391-4d98-be0b-7ee89947cbe8","Type":"ContainerDied","Data":"2101413e734525dd5d889eb53077f8369b9f19c421f4caa63081eb34f4419ea0"} Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.620189 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2825fd40-7391-4d98-be0b-7ee89947cbe8","Type":"ContainerDied","Data":"ca60e8941a601cf4a3ea5964db06ee10798629f4c3561b5b6d65470625dbac1e"} Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.620209 5055 scope.go:117] "RemoveContainer" containerID="2101413e734525dd5d889eb53077f8369b9f19c421f4caa63081eb34f4419ea0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.620308 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.672203 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.692000 5055 scope.go:117] "RemoveContainer" containerID="3983c5a970b9d52b997e3014747def678ed61890a115634c34f516bacbe061c3" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.695114 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-6x24c"] Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.708921 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.723835 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:52 crc kubenswrapper[5055]: E1011 07:16:52.724632 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2825fd40-7391-4d98-be0b-7ee89947cbe8" containerName="nova-api-api" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.724649 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="2825fd40-7391-4d98-be0b-7ee89947cbe8" containerName="nova-api-api" Oct 11 07:16:52 crc kubenswrapper[5055]: E1011 07:16:52.724668 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2825fd40-7391-4d98-be0b-7ee89947cbe8" containerName="nova-api-log" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.724675 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="2825fd40-7391-4d98-be0b-7ee89947cbe8" containerName="nova-api-log" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.724871 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="2825fd40-7391-4d98-be0b-7ee89947cbe8" containerName="nova-api-api" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.724896 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="2825fd40-7391-4d98-be0b-7ee89947cbe8" containerName="nova-api-log" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.725879 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.737198 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.737359 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.737514 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.740816 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.821732 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99f43cd6-a239-467a-87ce-95cb7f8025c1-logs\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.821898 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wg4kq\" (UniqueName: \"kubernetes.io/projected/99f43cd6-a239-467a-87ce-95cb7f8025c1-kube-api-access-wg4kq\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.821921 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.821961 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-public-tls-certs\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.821981 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-config-data\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.821999 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.848482 5055 scope.go:117] "RemoveContainer" containerID="2101413e734525dd5d889eb53077f8369b9f19c421f4caa63081eb34f4419ea0" Oct 11 07:16:52 crc kubenswrapper[5055]: E1011 07:16:52.852620 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2101413e734525dd5d889eb53077f8369b9f19c421f4caa63081eb34f4419ea0\": container with ID starting with 2101413e734525dd5d889eb53077f8369b9f19c421f4caa63081eb34f4419ea0 not found: ID does not exist" containerID="2101413e734525dd5d889eb53077f8369b9f19c421f4caa63081eb34f4419ea0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.852647 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2101413e734525dd5d889eb53077f8369b9f19c421f4caa63081eb34f4419ea0"} err="failed to get container status \"2101413e734525dd5d889eb53077f8369b9f19c421f4caa63081eb34f4419ea0\": rpc error: code = NotFound desc = could not find container \"2101413e734525dd5d889eb53077f8369b9f19c421f4caa63081eb34f4419ea0\": container with ID starting with 2101413e734525dd5d889eb53077f8369b9f19c421f4caa63081eb34f4419ea0 not found: ID does not exist" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.852667 5055 scope.go:117] "RemoveContainer" containerID="3983c5a970b9d52b997e3014747def678ed61890a115634c34f516bacbe061c3" Oct 11 07:16:52 crc kubenswrapper[5055]: E1011 07:16:52.853440 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3983c5a970b9d52b997e3014747def678ed61890a115634c34f516bacbe061c3\": container with ID starting with 3983c5a970b9d52b997e3014747def678ed61890a115634c34f516bacbe061c3 not found: ID does not exist" containerID="3983c5a970b9d52b997e3014747def678ed61890a115634c34f516bacbe061c3" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.853460 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3983c5a970b9d52b997e3014747def678ed61890a115634c34f516bacbe061c3"} err="failed to get container status \"3983c5a970b9d52b997e3014747def678ed61890a115634c34f516bacbe061c3\": rpc error: code = NotFound desc = could not find container \"3983c5a970b9d52b997e3014747def678ed61890a115634c34f516bacbe061c3\": container with ID starting with 3983c5a970b9d52b997e3014747def678ed61890a115634c34f516bacbe061c3 not found: ID does not exist" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.924146 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-public-tls-certs\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.924210 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-config-data\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.924238 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.924295 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99f43cd6-a239-467a-87ce-95cb7f8025c1-logs\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.924385 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wg4kq\" (UniqueName: \"kubernetes.io/projected/99f43cd6-a239-467a-87ce-95cb7f8025c1-kube-api-access-wg4kq\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.924401 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.925108 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99f43cd6-a239-467a-87ce-95cb7f8025c1-logs\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.928134 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-public-tls-certs\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.928332 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.928402 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.928979 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-config-data\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:52 crc kubenswrapper[5055]: I1011 07:16:52.941833 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wg4kq\" (UniqueName: \"kubernetes.io/projected/99f43cd6-a239-467a-87ce-95cb7f8025c1-kube-api-access-wg4kq\") pod \"nova-api-0\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " pod="openstack/nova-api-0" Oct 11 07:16:53 crc kubenswrapper[5055]: I1011 07:16:53.010899 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e2eba38-a0db-4a96-80a5-28fbb5c96106" path="/var/lib/kubelet/pods/1e2eba38-a0db-4a96-80a5-28fbb5c96106/volumes" Oct 11 07:16:53 crc kubenswrapper[5055]: I1011 07:16:53.011625 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2825fd40-7391-4d98-be0b-7ee89947cbe8" path="/var/lib/kubelet/pods/2825fd40-7391-4d98-be0b-7ee89947cbe8/volumes" Oct 11 07:16:53 crc kubenswrapper[5055]: I1011 07:16:53.157000 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:16:53 crc kubenswrapper[5055]: I1011 07:16:53.649142 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6x24c" event={"ID":"e7982ac6-3330-40ea-a74b-1f512b13b004","Type":"ContainerStarted","Data":"8d98edc9568b84b014fa7d41ec6840140f135da6ee307da88b52c367995dc9a7"} Oct 11 07:16:53 crc kubenswrapper[5055]: I1011 07:16:53.650031 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6x24c" event={"ID":"e7982ac6-3330-40ea-a74b-1f512b13b004","Type":"ContainerStarted","Data":"ce4c245a6093bf44df3407c4e960f7995632627d4654533e85ab607f5e6f0456"} Oct 11 07:16:53 crc kubenswrapper[5055]: I1011 07:16:53.661149 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:16:53 crc kubenswrapper[5055]: I1011 07:16:53.664865 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3cc22641-a524-415a-8377-580664ed5d90","Type":"ContainerStarted","Data":"e7de0a43fb54dfe049156667e0b998705b9e78b7bff9600f863738f69bbff3b9"} Oct 11 07:16:54 crc kubenswrapper[5055]: I1011 07:16:54.674138 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3cc22641-a524-415a-8377-580664ed5d90","Type":"ContainerStarted","Data":"919ef65eeeea598c7b3f2a94cda4ed6f77049d1b185e1b4613c3eaa5ae3f9970"} Oct 11 07:16:54 crc kubenswrapper[5055]: I1011 07:16:54.674426 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3cc22641-a524-415a-8377-580664ed5d90","Type":"ContainerStarted","Data":"bbcfbd58af515683c0fa41f7883c1636fcf3f11322b07edf17837f0776642867"} Oct 11 07:16:54 crc kubenswrapper[5055]: I1011 07:16:54.677842 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"99f43cd6-a239-467a-87ce-95cb7f8025c1","Type":"ContainerStarted","Data":"e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b"} Oct 11 07:16:54 crc kubenswrapper[5055]: I1011 07:16:54.677878 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"99f43cd6-a239-467a-87ce-95cb7f8025c1","Type":"ContainerStarted","Data":"be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf"} Oct 11 07:16:54 crc kubenswrapper[5055]: I1011 07:16:54.677889 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"99f43cd6-a239-467a-87ce-95cb7f8025c1","Type":"ContainerStarted","Data":"8987467d01c59a465c47c5feaa1787eb281f0eb7bc2bf2ce52bba57f83b6ae12"} Oct 11 07:16:54 crc kubenswrapper[5055]: I1011 07:16:54.699992 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.6999687039999998 podStartE2EDuration="2.699968704s" podCreationTimestamp="2025-10-11 07:16:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:16:54.697440432 +0000 UTC m=+1398.471714239" watchObservedRunningTime="2025-10-11 07:16:54.699968704 +0000 UTC m=+1398.474242501" Oct 11 07:16:54 crc kubenswrapper[5055]: I1011 07:16:54.708311 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-6x24c" podStartSLOduration=3.708288699 podStartE2EDuration="3.708288699s" podCreationTimestamp="2025-10-11 07:16:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:16:53.67606589 +0000 UTC m=+1397.450339697" watchObservedRunningTime="2025-10-11 07:16:54.708288699 +0000 UTC m=+1398.482562506" Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.065994 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.140694 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6ffc974fdf-mlh8n"] Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.141018 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" podUID="44d34b4e-3d9f-4d8c-9e0b-818f54207174" containerName="dnsmasq-dns" containerID="cri-o://e0f8f4f5ee8e27ab9d848e71a6fa0ca6a9e1475c91201c56ce1efa6697fb6e46" gracePeriod=10 Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.705195 5055 generic.go:334] "Generic (PLEG): container finished" podID="44d34b4e-3d9f-4d8c-9e0b-818f54207174" containerID="e0f8f4f5ee8e27ab9d848e71a6fa0ca6a9e1475c91201c56ce1efa6697fb6e46" exitCode=0 Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.705576 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" event={"ID":"44d34b4e-3d9f-4d8c-9e0b-818f54207174","Type":"ContainerDied","Data":"e0f8f4f5ee8e27ab9d848e71a6fa0ca6a9e1475c91201c56ce1efa6697fb6e46"} Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.709368 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3cc22641-a524-415a-8377-580664ed5d90","Type":"ContainerStarted","Data":"c87bdbf1b6954a66227351f91bbd585daf3ee9facdce59279624e5a7379654be"} Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.709499 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.732074 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.515031986 podStartE2EDuration="5.732054167s" podCreationTimestamp="2025-10-11 07:16:51 +0000 UTC" firstStartedPulling="2025-10-11 07:16:52.573363617 +0000 UTC m=+1396.347637424" lastFinishedPulling="2025-10-11 07:16:55.790385798 +0000 UTC m=+1399.564659605" observedRunningTime="2025-10-11 07:16:56.731150121 +0000 UTC m=+1400.505423928" watchObservedRunningTime="2025-10-11 07:16:56.732054167 +0000 UTC m=+1400.506327984" Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.849714 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.910638 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lgk6k\" (UniqueName: \"kubernetes.io/projected/44d34b4e-3d9f-4d8c-9e0b-818f54207174-kube-api-access-lgk6k\") pod \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.910701 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-dns-swift-storage-0\") pod \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.910920 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-dns-svc\") pod \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.910983 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-ovsdbserver-sb\") pod \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.911014 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-config\") pod \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.911055 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-ovsdbserver-nb\") pod \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\" (UID: \"44d34b4e-3d9f-4d8c-9e0b-818f54207174\") " Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.926003 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44d34b4e-3d9f-4d8c-9e0b-818f54207174-kube-api-access-lgk6k" (OuterVolumeSpecName: "kube-api-access-lgk6k") pod "44d34b4e-3d9f-4d8c-9e0b-818f54207174" (UID: "44d34b4e-3d9f-4d8c-9e0b-818f54207174"). InnerVolumeSpecName "kube-api-access-lgk6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.978461 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-config" (OuterVolumeSpecName: "config") pod "44d34b4e-3d9f-4d8c-9e0b-818f54207174" (UID: "44d34b4e-3d9f-4d8c-9e0b-818f54207174"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.979187 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "44d34b4e-3d9f-4d8c-9e0b-818f54207174" (UID: "44d34b4e-3d9f-4d8c-9e0b-818f54207174"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.989731 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "44d34b4e-3d9f-4d8c-9e0b-818f54207174" (UID: "44d34b4e-3d9f-4d8c-9e0b-818f54207174"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:16:56 crc kubenswrapper[5055]: I1011 07:16:56.996517 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "44d34b4e-3d9f-4d8c-9e0b-818f54207174" (UID: "44d34b4e-3d9f-4d8c-9e0b-818f54207174"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:16:57 crc kubenswrapper[5055]: I1011 07:16:57.004377 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "44d34b4e-3d9f-4d8c-9e0b-818f54207174" (UID: "44d34b4e-3d9f-4d8c-9e0b-818f54207174"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:16:57 crc kubenswrapper[5055]: I1011 07:16:57.014357 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:57 crc kubenswrapper[5055]: I1011 07:16:57.014400 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lgk6k\" (UniqueName: \"kubernetes.io/projected/44d34b4e-3d9f-4d8c-9e0b-818f54207174-kube-api-access-lgk6k\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:57 crc kubenswrapper[5055]: I1011 07:16:57.014417 5055 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:57 crc kubenswrapper[5055]: I1011 07:16:57.014429 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:57 crc kubenswrapper[5055]: I1011 07:16:57.014440 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:57 crc kubenswrapper[5055]: I1011 07:16:57.014453 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/44d34b4e-3d9f-4d8c-9e0b-818f54207174-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:16:57 crc kubenswrapper[5055]: I1011 07:16:57.737396 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" event={"ID":"44d34b4e-3d9f-4d8c-9e0b-818f54207174","Type":"ContainerDied","Data":"06aa87f2feec55dc8d8526178c948490e64b8d67700318048c257792057e7fdb"} Oct 11 07:16:57 crc kubenswrapper[5055]: I1011 07:16:57.737709 5055 scope.go:117] "RemoveContainer" containerID="e0f8f4f5ee8e27ab9d848e71a6fa0ca6a9e1475c91201c56ce1efa6697fb6e46" Oct 11 07:16:57 crc kubenswrapper[5055]: I1011 07:16:57.739692 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffc974fdf-mlh8n" Oct 11 07:16:57 crc kubenswrapper[5055]: I1011 07:16:57.791609 5055 scope.go:117] "RemoveContainer" containerID="6aaa9ba325ed091ed92e606e40a5233df374e585e7a750933844ee44f6e77983" Oct 11 07:16:57 crc kubenswrapper[5055]: I1011 07:16:57.791985 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6ffc974fdf-mlh8n"] Oct 11 07:16:57 crc kubenswrapper[5055]: I1011 07:16:57.805915 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6ffc974fdf-mlh8n"] Oct 11 07:16:58 crc kubenswrapper[5055]: I1011 07:16:58.746991 5055 generic.go:334] "Generic (PLEG): container finished" podID="e7982ac6-3330-40ea-a74b-1f512b13b004" containerID="8d98edc9568b84b014fa7d41ec6840140f135da6ee307da88b52c367995dc9a7" exitCode=0 Oct 11 07:16:58 crc kubenswrapper[5055]: I1011 07:16:58.747084 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6x24c" event={"ID":"e7982ac6-3330-40ea-a74b-1f512b13b004","Type":"ContainerDied","Data":"8d98edc9568b84b014fa7d41ec6840140f135da6ee307da88b52c367995dc9a7"} Oct 11 07:16:59 crc kubenswrapper[5055]: I1011 07:16:59.005038 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44d34b4e-3d9f-4d8c-9e0b-818f54207174" path="/var/lib/kubelet/pods/44d34b4e-3d9f-4d8c-9e0b-818f54207174/volumes" Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.129980 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.271519 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vg8rj\" (UniqueName: \"kubernetes.io/projected/e7982ac6-3330-40ea-a74b-1f512b13b004-kube-api-access-vg8rj\") pod \"e7982ac6-3330-40ea-a74b-1f512b13b004\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.271790 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-combined-ca-bundle\") pod \"e7982ac6-3330-40ea-a74b-1f512b13b004\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.271916 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-config-data\") pod \"e7982ac6-3330-40ea-a74b-1f512b13b004\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.272063 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-scripts\") pod \"e7982ac6-3330-40ea-a74b-1f512b13b004\" (UID: \"e7982ac6-3330-40ea-a74b-1f512b13b004\") " Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.277827 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7982ac6-3330-40ea-a74b-1f512b13b004-kube-api-access-vg8rj" (OuterVolumeSpecName: "kube-api-access-vg8rj") pod "e7982ac6-3330-40ea-a74b-1f512b13b004" (UID: "e7982ac6-3330-40ea-a74b-1f512b13b004"). InnerVolumeSpecName "kube-api-access-vg8rj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.278024 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-scripts" (OuterVolumeSpecName: "scripts") pod "e7982ac6-3330-40ea-a74b-1f512b13b004" (UID: "e7982ac6-3330-40ea-a74b-1f512b13b004"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.300922 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e7982ac6-3330-40ea-a74b-1f512b13b004" (UID: "e7982ac6-3330-40ea-a74b-1f512b13b004"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.307117 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-config-data" (OuterVolumeSpecName: "config-data") pod "e7982ac6-3330-40ea-a74b-1f512b13b004" (UID: "e7982ac6-3330-40ea-a74b-1f512b13b004"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.373674 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vg8rj\" (UniqueName: \"kubernetes.io/projected/e7982ac6-3330-40ea-a74b-1f512b13b004-kube-api-access-vg8rj\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.373705 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.373714 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.373723 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7982ac6-3330-40ea-a74b-1f512b13b004-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.777325 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6x24c" event={"ID":"e7982ac6-3330-40ea-a74b-1f512b13b004","Type":"ContainerDied","Data":"ce4c245a6093bf44df3407c4e960f7995632627d4654533e85ab607f5e6f0456"} Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.777612 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce4c245a6093bf44df3407c4e960f7995632627d4654533e85ab607f5e6f0456" Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.777529 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6x24c" Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.950467 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.950732 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="99f43cd6-a239-467a-87ce-95cb7f8025c1" containerName="nova-api-log" containerID="cri-o://be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf" gracePeriod=30 Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.950826 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="99f43cd6-a239-467a-87ce-95cb7f8025c1" containerName="nova-api-api" containerID="cri-o://e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b" gracePeriod=30 Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.960790 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:17:00 crc kubenswrapper[5055]: I1011 07:17:00.961215 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="4add32bf-085d-4ed0-9e73-688f3c8bd8b9" containerName="nova-scheduler-scheduler" containerID="cri-o://ec2facf50e444faa12052207cfdadf93f534e702db147e2422b0ca62c80cd3f5" gracePeriod=30 Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.024234 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.024461 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="749782e2-8a84-42d3-ab86-edcf45344967" containerName="nova-metadata-log" containerID="cri-o://ca8c46bcaf0af78b1367ef031bfcedfbfe77697aa0f987cb1e615ae9187fb7c7" gracePeriod=30 Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.024903 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="749782e2-8a84-42d3-ab86-edcf45344967" containerName="nova-metadata-metadata" containerID="cri-o://d6309c7f73d90e8d6472587a67a00451c61588383fdc67aed9725fbba209c614" gracePeriod=30 Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.684435 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.698694 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wg4kq\" (UniqueName: \"kubernetes.io/projected/99f43cd6-a239-467a-87ce-95cb7f8025c1-kube-api-access-wg4kq\") pod \"99f43cd6-a239-467a-87ce-95cb7f8025c1\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.698809 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99f43cd6-a239-467a-87ce-95cb7f8025c1-logs\") pod \"99f43cd6-a239-467a-87ce-95cb7f8025c1\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.698875 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-public-tls-certs\") pod \"99f43cd6-a239-467a-87ce-95cb7f8025c1\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.698927 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-config-data\") pod \"99f43cd6-a239-467a-87ce-95cb7f8025c1\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.698957 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-internal-tls-certs\") pod \"99f43cd6-a239-467a-87ce-95cb7f8025c1\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.699000 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-combined-ca-bundle\") pod \"99f43cd6-a239-467a-87ce-95cb7f8025c1\" (UID: \"99f43cd6-a239-467a-87ce-95cb7f8025c1\") " Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.702947 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99f43cd6-a239-467a-87ce-95cb7f8025c1-logs" (OuterVolumeSpecName: "logs") pod "99f43cd6-a239-467a-87ce-95cb7f8025c1" (UID: "99f43cd6-a239-467a-87ce-95cb7f8025c1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.712800 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99f43cd6-a239-467a-87ce-95cb7f8025c1-kube-api-access-wg4kq" (OuterVolumeSpecName: "kube-api-access-wg4kq") pod "99f43cd6-a239-467a-87ce-95cb7f8025c1" (UID: "99f43cd6-a239-467a-87ce-95cb7f8025c1"). InnerVolumeSpecName "kube-api-access-wg4kq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.744016 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "99f43cd6-a239-467a-87ce-95cb7f8025c1" (UID: "99f43cd6-a239-467a-87ce-95cb7f8025c1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.756529 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-config-data" (OuterVolumeSpecName: "config-data") pod "99f43cd6-a239-467a-87ce-95cb7f8025c1" (UID: "99f43cd6-a239-467a-87ce-95cb7f8025c1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.782044 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "99f43cd6-a239-467a-87ce-95cb7f8025c1" (UID: "99f43cd6-a239-467a-87ce-95cb7f8025c1"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.792820 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "99f43cd6-a239-467a-87ce-95cb7f8025c1" (UID: "99f43cd6-a239-467a-87ce-95cb7f8025c1"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.795740 5055 generic.go:334] "Generic (PLEG): container finished" podID="749782e2-8a84-42d3-ab86-edcf45344967" containerID="ca8c46bcaf0af78b1367ef031bfcedfbfe77697aa0f987cb1e615ae9187fb7c7" exitCode=143 Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.795857 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"749782e2-8a84-42d3-ab86-edcf45344967","Type":"ContainerDied","Data":"ca8c46bcaf0af78b1367ef031bfcedfbfe77697aa0f987cb1e615ae9187fb7c7"} Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.798580 5055 generic.go:334] "Generic (PLEG): container finished" podID="99f43cd6-a239-467a-87ce-95cb7f8025c1" containerID="e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b" exitCode=0 Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.798610 5055 generic.go:334] "Generic (PLEG): container finished" podID="99f43cd6-a239-467a-87ce-95cb7f8025c1" containerID="be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf" exitCode=143 Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.798649 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.798666 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"99f43cd6-a239-467a-87ce-95cb7f8025c1","Type":"ContainerDied","Data":"e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b"} Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.798692 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"99f43cd6-a239-467a-87ce-95cb7f8025c1","Type":"ContainerDied","Data":"be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf"} Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.798702 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"99f43cd6-a239-467a-87ce-95cb7f8025c1","Type":"ContainerDied","Data":"8987467d01c59a465c47c5feaa1787eb281f0eb7bc2bf2ce52bba57f83b6ae12"} Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.798718 5055 scope.go:117] "RemoveContainer" containerID="e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.808399 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.808433 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wg4kq\" (UniqueName: \"kubernetes.io/projected/99f43cd6-a239-467a-87ce-95cb7f8025c1-kube-api-access-wg4kq\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.808443 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99f43cd6-a239-467a-87ce-95cb7f8025c1-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.808452 5055 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.808466 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.808474 5055 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/99f43cd6-a239-467a-87ce-95cb7f8025c1-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.827300 5055 generic.go:334] "Generic (PLEG): container finished" podID="4add32bf-085d-4ed0-9e73-688f3c8bd8b9" containerID="ec2facf50e444faa12052207cfdadf93f534e702db147e2422b0ca62c80cd3f5" exitCode=0 Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.827351 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4add32bf-085d-4ed0-9e73-688f3c8bd8b9","Type":"ContainerDied","Data":"ec2facf50e444faa12052207cfdadf93f534e702db147e2422b0ca62c80cd3f5"} Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.874892 5055 scope.go:117] "RemoveContainer" containerID="be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.891267 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.898379 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.899745 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.904856 5055 scope.go:117] "RemoveContainer" containerID="e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b" Oct 11 07:17:01 crc kubenswrapper[5055]: E1011 07:17:01.908291 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b\": container with ID starting with e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b not found: ID does not exist" containerID="e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.908333 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b"} err="failed to get container status \"e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b\": rpc error: code = NotFound desc = could not find container \"e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b\": container with ID starting with e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b not found: ID does not exist" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.908362 5055 scope.go:117] "RemoveContainer" containerID="be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf" Oct 11 07:17:01 crc kubenswrapper[5055]: E1011 07:17:01.916553 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf\": container with ID starting with be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf not found: ID does not exist" containerID="be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.916607 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf"} err="failed to get container status \"be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf\": rpc error: code = NotFound desc = could not find container \"be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf\": container with ID starting with be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf not found: ID does not exist" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.916639 5055 scope.go:117] "RemoveContainer" containerID="e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.916750 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 11 07:17:01 crc kubenswrapper[5055]: E1011 07:17:01.917256 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44d34b4e-3d9f-4d8c-9e0b-818f54207174" containerName="dnsmasq-dns" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.917279 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="44d34b4e-3d9f-4d8c-9e0b-818f54207174" containerName="dnsmasq-dns" Oct 11 07:17:01 crc kubenswrapper[5055]: E1011 07:17:01.917299 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99f43cd6-a239-467a-87ce-95cb7f8025c1" containerName="nova-api-api" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.917307 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="99f43cd6-a239-467a-87ce-95cb7f8025c1" containerName="nova-api-api" Oct 11 07:17:01 crc kubenswrapper[5055]: E1011 07:17:01.917320 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7982ac6-3330-40ea-a74b-1f512b13b004" containerName="nova-manage" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.917328 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7982ac6-3330-40ea-a74b-1f512b13b004" containerName="nova-manage" Oct 11 07:17:01 crc kubenswrapper[5055]: E1011 07:17:01.917354 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44d34b4e-3d9f-4d8c-9e0b-818f54207174" containerName="init" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.917361 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="44d34b4e-3d9f-4d8c-9e0b-818f54207174" containerName="init" Oct 11 07:17:01 crc kubenswrapper[5055]: E1011 07:17:01.917394 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4add32bf-085d-4ed0-9e73-688f3c8bd8b9" containerName="nova-scheduler-scheduler" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.917405 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4add32bf-085d-4ed0-9e73-688f3c8bd8b9" containerName="nova-scheduler-scheduler" Oct 11 07:17:01 crc kubenswrapper[5055]: E1011 07:17:01.917418 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99f43cd6-a239-467a-87ce-95cb7f8025c1" containerName="nova-api-log" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.917426 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="99f43cd6-a239-467a-87ce-95cb7f8025c1" containerName="nova-api-log" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.917452 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b"} err="failed to get container status \"e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b\": rpc error: code = NotFound desc = could not find container \"e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b\": container with ID starting with e7a18c7ec3c9a87b8574a9c3a933a2d90afb7e000a565a0b5b4c1ed97d52f32b not found: ID does not exist" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.917494 5055 scope.go:117] "RemoveContainer" containerID="be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.917670 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="4add32bf-085d-4ed0-9e73-688f3c8bd8b9" containerName="nova-scheduler-scheduler" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.917692 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="99f43cd6-a239-467a-87ce-95cb7f8025c1" containerName="nova-api-log" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.917708 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="44d34b4e-3d9f-4d8c-9e0b-818f54207174" containerName="dnsmasq-dns" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.917718 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="99f43cd6-a239-467a-87ce-95cb7f8025c1" containerName="nova-api-api" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.917733 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7982ac6-3330-40ea-a74b-1f512b13b004" containerName="nova-manage" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.917760 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf"} err="failed to get container status \"be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf\": rpc error: code = NotFound desc = could not find container \"be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf\": container with ID starting with be2668b6e55d85b0e273c8bafa2bfe4b7a165197fbe9542aaf2f6d6abf75dbaf not found: ID does not exist" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.919044 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.929550 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.930088 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.930253 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Oct 11 07:17:01 crc kubenswrapper[5055]: I1011 07:17:01.930367 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.012427 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-combined-ca-bundle\") pod \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\" (UID: \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\") " Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.012822 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfm6l\" (UniqueName: \"kubernetes.io/projected/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-kube-api-access-wfm6l\") pod \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\" (UID: \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\") " Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.012991 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-config-data\") pod \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\" (UID: \"4add32bf-085d-4ed0-9e73-688f3c8bd8b9\") " Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.013442 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsvdl\" (UniqueName: \"kubernetes.io/projected/451f7ab0-0020-4931-a42f-f56eb5b28201-kube-api-access-tsvdl\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.013595 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-public-tls-certs\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.013705 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/451f7ab0-0020-4931-a42f-f56eb5b28201-logs\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.013815 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-internal-tls-certs\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.013973 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-config-data\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.014279 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.016444 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-kube-api-access-wfm6l" (OuterVolumeSpecName: "kube-api-access-wfm6l") pod "4add32bf-085d-4ed0-9e73-688f3c8bd8b9" (UID: "4add32bf-085d-4ed0-9e73-688f3c8bd8b9"). InnerVolumeSpecName "kube-api-access-wfm6l". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.040565 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4add32bf-085d-4ed0-9e73-688f3c8bd8b9" (UID: "4add32bf-085d-4ed0-9e73-688f3c8bd8b9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.042588 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-config-data" (OuterVolumeSpecName: "config-data") pod "4add32bf-085d-4ed0-9e73-688f3c8bd8b9" (UID: "4add32bf-085d-4ed0-9e73-688f3c8bd8b9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.116607 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.116676 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsvdl\" (UniqueName: \"kubernetes.io/projected/451f7ab0-0020-4931-a42f-f56eb5b28201-kube-api-access-tsvdl\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.116701 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-public-tls-certs\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.116732 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/451f7ab0-0020-4931-a42f-f56eb5b28201-logs\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.116755 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-internal-tls-certs\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.116798 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-config-data\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.116839 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.116849 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.116859 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfm6l\" (UniqueName: \"kubernetes.io/projected/4add32bf-085d-4ed0-9e73-688f3c8bd8b9-kube-api-access-wfm6l\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.117353 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/451f7ab0-0020-4931-a42f-f56eb5b28201-logs\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.119951 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.120209 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-public-tls-certs\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.120349 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-config-data\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.120767 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-internal-tls-certs\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.135174 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsvdl\" (UniqueName: \"kubernetes.io/projected/451f7ab0-0020-4931-a42f-f56eb5b28201-kube-api-access-tsvdl\") pod \"nova-api-0\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.239142 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.803896 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.842453 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4add32bf-085d-4ed0-9e73-688f3c8bd8b9","Type":"ContainerDied","Data":"40e4298b124acf760e3f32009d2bf28b0e61d5f30c6fbd0c2c8d5e4c1c5ac7a8"} Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.842522 5055 scope.go:117] "RemoveContainer" containerID="ec2facf50e444faa12052207cfdadf93f534e702db147e2422b0ca62c80cd3f5" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.842594 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.847943 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"451f7ab0-0020-4931-a42f-f56eb5b28201","Type":"ContainerStarted","Data":"6112632e87c1745d89d18d736d80fbc7beda702b35d0db0b2ea2edef427f7a56"} Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.884971 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.895364 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.904216 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.905561 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.907602 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.918162 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.934713 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2e0c881-0205-4317-ba03-cfb76f0f69e6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\") " pod="openstack/nova-scheduler-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.934798 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hw6mx\" (UniqueName: \"kubernetes.io/projected/e2e0c881-0205-4317-ba03-cfb76f0f69e6-kube-api-access-hw6mx\") pod \"nova-scheduler-0\" (UID: \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\") " pod="openstack/nova-scheduler-0" Oct 11 07:17:02 crc kubenswrapper[5055]: I1011 07:17:02.934839 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2e0c881-0205-4317-ba03-cfb76f0f69e6-config-data\") pod \"nova-scheduler-0\" (UID: \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\") " pod="openstack/nova-scheduler-0" Oct 11 07:17:03 crc kubenswrapper[5055]: I1011 07:17:03.005267 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4add32bf-085d-4ed0-9e73-688f3c8bd8b9" path="/var/lib/kubelet/pods/4add32bf-085d-4ed0-9e73-688f3c8bd8b9/volumes" Oct 11 07:17:03 crc kubenswrapper[5055]: I1011 07:17:03.005801 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99f43cd6-a239-467a-87ce-95cb7f8025c1" path="/var/lib/kubelet/pods/99f43cd6-a239-467a-87ce-95cb7f8025c1/volumes" Oct 11 07:17:03 crc kubenswrapper[5055]: I1011 07:17:03.036889 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2e0c881-0205-4317-ba03-cfb76f0f69e6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\") " pod="openstack/nova-scheduler-0" Oct 11 07:17:03 crc kubenswrapper[5055]: I1011 07:17:03.036954 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hw6mx\" (UniqueName: \"kubernetes.io/projected/e2e0c881-0205-4317-ba03-cfb76f0f69e6-kube-api-access-hw6mx\") pod \"nova-scheduler-0\" (UID: \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\") " pod="openstack/nova-scheduler-0" Oct 11 07:17:03 crc kubenswrapper[5055]: I1011 07:17:03.036997 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2e0c881-0205-4317-ba03-cfb76f0f69e6-config-data\") pod \"nova-scheduler-0\" (UID: \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\") " pod="openstack/nova-scheduler-0" Oct 11 07:17:03 crc kubenswrapper[5055]: I1011 07:17:03.040099 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2e0c881-0205-4317-ba03-cfb76f0f69e6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\") " pod="openstack/nova-scheduler-0" Oct 11 07:17:03 crc kubenswrapper[5055]: I1011 07:17:03.040392 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2e0c881-0205-4317-ba03-cfb76f0f69e6-config-data\") pod \"nova-scheduler-0\" (UID: \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\") " pod="openstack/nova-scheduler-0" Oct 11 07:17:03 crc kubenswrapper[5055]: I1011 07:17:03.052593 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hw6mx\" (UniqueName: \"kubernetes.io/projected/e2e0c881-0205-4317-ba03-cfb76f0f69e6-kube-api-access-hw6mx\") pod \"nova-scheduler-0\" (UID: \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\") " pod="openstack/nova-scheduler-0" Oct 11 07:17:03 crc kubenswrapper[5055]: I1011 07:17:03.244112 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 07:17:03 crc kubenswrapper[5055]: I1011 07:17:03.701271 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:17:03 crc kubenswrapper[5055]: W1011 07:17:03.709407 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode2e0c881_0205_4317_ba03_cfb76f0f69e6.slice/crio-1d574623ada8580564af4480ec8905f977d1293c996b9e7d728ea9ba7d1939f9 WatchSource:0}: Error finding container 1d574623ada8580564af4480ec8905f977d1293c996b9e7d728ea9ba7d1939f9: Status 404 returned error can't find the container with id 1d574623ada8580564af4480ec8905f977d1293c996b9e7d728ea9ba7d1939f9 Oct 11 07:17:03 crc kubenswrapper[5055]: I1011 07:17:03.861025 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"451f7ab0-0020-4931-a42f-f56eb5b28201","Type":"ContainerStarted","Data":"d28b05014afad86588cab28904e2c797b56d4471ded7feefa065183a00b9d6ab"} Oct 11 07:17:03 crc kubenswrapper[5055]: I1011 07:17:03.861068 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"451f7ab0-0020-4931-a42f-f56eb5b28201","Type":"ContainerStarted","Data":"46946b80937373ad46df4157d6f6a132a04b8f37382322b6bc9b29dca6a898ce"} Oct 11 07:17:03 crc kubenswrapper[5055]: I1011 07:17:03.883563 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e2e0c881-0205-4317-ba03-cfb76f0f69e6","Type":"ContainerStarted","Data":"1d574623ada8580564af4480ec8905f977d1293c996b9e7d728ea9ba7d1939f9"} Oct 11 07:17:03 crc kubenswrapper[5055]: I1011 07:17:03.891792 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.8917756089999997 podStartE2EDuration="2.891775609s" podCreationTimestamp="2025-10-11 07:17:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:17:03.888137116 +0000 UTC m=+1407.662410923" watchObservedRunningTime="2025-10-11 07:17:03.891775609 +0000 UTC m=+1407.666049416" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.157456 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="749782e2-8a84-42d3-ab86-edcf45344967" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.189:8775/\": read tcp 10.217.0.2:45850->10.217.0.189:8775: read: connection reset by peer" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.157687 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="749782e2-8a84-42d3-ab86-edcf45344967" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.189:8775/\": read tcp 10.217.0.2:45852->10.217.0.189:8775: read: connection reset by peer" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.621150 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.794847 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/749782e2-8a84-42d3-ab86-edcf45344967-logs\") pod \"749782e2-8a84-42d3-ab86-edcf45344967\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.794945 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-combined-ca-bundle\") pod \"749782e2-8a84-42d3-ab86-edcf45344967\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.795006 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-nova-metadata-tls-certs\") pod \"749782e2-8a84-42d3-ab86-edcf45344967\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.795276 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/749782e2-8a84-42d3-ab86-edcf45344967-logs" (OuterVolumeSpecName: "logs") pod "749782e2-8a84-42d3-ab86-edcf45344967" (UID: "749782e2-8a84-42d3-ab86-edcf45344967"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.795688 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-config-data\") pod \"749782e2-8a84-42d3-ab86-edcf45344967\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.795780 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z97tb\" (UniqueName: \"kubernetes.io/projected/749782e2-8a84-42d3-ab86-edcf45344967-kube-api-access-z97tb\") pod \"749782e2-8a84-42d3-ab86-edcf45344967\" (UID: \"749782e2-8a84-42d3-ab86-edcf45344967\") " Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.821201 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/749782e2-8a84-42d3-ab86-edcf45344967-kube-api-access-z97tb" (OuterVolumeSpecName: "kube-api-access-z97tb") pod "749782e2-8a84-42d3-ab86-edcf45344967" (UID: "749782e2-8a84-42d3-ab86-edcf45344967"). InnerVolumeSpecName "kube-api-access-z97tb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.823503 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-config-data" (OuterVolumeSpecName: "config-data") pod "749782e2-8a84-42d3-ab86-edcf45344967" (UID: "749782e2-8a84-42d3-ab86-edcf45344967"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.834033 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "749782e2-8a84-42d3-ab86-edcf45344967" (UID: "749782e2-8a84-42d3-ab86-edcf45344967"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.864111 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "749782e2-8a84-42d3-ab86-edcf45344967" (UID: "749782e2-8a84-42d3-ab86-edcf45344967"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.897145 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.897184 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z97tb\" (UniqueName: \"kubernetes.io/projected/749782e2-8a84-42d3-ab86-edcf45344967-kube-api-access-z97tb\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.897196 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/749782e2-8a84-42d3-ab86-edcf45344967-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.897210 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.897221 5055 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/749782e2-8a84-42d3-ab86-edcf45344967-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.904565 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e2e0c881-0205-4317-ba03-cfb76f0f69e6","Type":"ContainerStarted","Data":"cf5f42235a42f219aee9d395cf87f7e377aaf80da0d2626e197e998f6d477ba8"} Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.906307 5055 generic.go:334] "Generic (PLEG): container finished" podID="749782e2-8a84-42d3-ab86-edcf45344967" containerID="d6309c7f73d90e8d6472587a67a00451c61588383fdc67aed9725fbba209c614" exitCode=0 Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.906367 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.906422 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"749782e2-8a84-42d3-ab86-edcf45344967","Type":"ContainerDied","Data":"d6309c7f73d90e8d6472587a67a00451c61588383fdc67aed9725fbba209c614"} Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.906485 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"749782e2-8a84-42d3-ab86-edcf45344967","Type":"ContainerDied","Data":"770597d409b69d117ab5e6464d58d9b706549b315cabe46e4149141cbff3985c"} Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.906522 5055 scope.go:117] "RemoveContainer" containerID="d6309c7f73d90e8d6472587a67a00451c61588383fdc67aed9725fbba209c614" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.931259 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.931237023 podStartE2EDuration="2.931237023s" podCreationTimestamp="2025-10-11 07:17:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:17:04.925217413 +0000 UTC m=+1408.699491240" watchObservedRunningTime="2025-10-11 07:17:04.931237023 +0000 UTC m=+1408.705510840" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.937877 5055 scope.go:117] "RemoveContainer" containerID="ca8c46bcaf0af78b1367ef031bfcedfbfe77697aa0f987cb1e615ae9187fb7c7" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.963652 5055 scope.go:117] "RemoveContainer" containerID="d6309c7f73d90e8d6472587a67a00451c61588383fdc67aed9725fbba209c614" Oct 11 07:17:04 crc kubenswrapper[5055]: E1011 07:17:04.964045 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6309c7f73d90e8d6472587a67a00451c61588383fdc67aed9725fbba209c614\": container with ID starting with d6309c7f73d90e8d6472587a67a00451c61588383fdc67aed9725fbba209c614 not found: ID does not exist" containerID="d6309c7f73d90e8d6472587a67a00451c61588383fdc67aed9725fbba209c614" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.964077 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6309c7f73d90e8d6472587a67a00451c61588383fdc67aed9725fbba209c614"} err="failed to get container status \"d6309c7f73d90e8d6472587a67a00451c61588383fdc67aed9725fbba209c614\": rpc error: code = NotFound desc = could not find container \"d6309c7f73d90e8d6472587a67a00451c61588383fdc67aed9725fbba209c614\": container with ID starting with d6309c7f73d90e8d6472587a67a00451c61588383fdc67aed9725fbba209c614 not found: ID does not exist" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.964104 5055 scope.go:117] "RemoveContainer" containerID="ca8c46bcaf0af78b1367ef031bfcedfbfe77697aa0f987cb1e615ae9187fb7c7" Oct 11 07:17:04 crc kubenswrapper[5055]: E1011 07:17:04.964550 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca8c46bcaf0af78b1367ef031bfcedfbfe77697aa0f987cb1e615ae9187fb7c7\": container with ID starting with ca8c46bcaf0af78b1367ef031bfcedfbfe77697aa0f987cb1e615ae9187fb7c7 not found: ID does not exist" containerID="ca8c46bcaf0af78b1367ef031bfcedfbfe77697aa0f987cb1e615ae9187fb7c7" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.964572 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca8c46bcaf0af78b1367ef031bfcedfbfe77697aa0f987cb1e615ae9187fb7c7"} err="failed to get container status \"ca8c46bcaf0af78b1367ef031bfcedfbfe77697aa0f987cb1e615ae9187fb7c7\": rpc error: code = NotFound desc = could not find container \"ca8c46bcaf0af78b1367ef031bfcedfbfe77697aa0f987cb1e615ae9187fb7c7\": container with ID starting with ca8c46bcaf0af78b1367ef031bfcedfbfe77697aa0f987cb1e615ae9187fb7c7 not found: ID does not exist" Oct 11 07:17:04 crc kubenswrapper[5055]: I1011 07:17:04.988265 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.039730 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.039835 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:17:05 crc kubenswrapper[5055]: E1011 07:17:05.040247 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="749782e2-8a84-42d3-ab86-edcf45344967" containerName="nova-metadata-metadata" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.040268 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="749782e2-8a84-42d3-ab86-edcf45344967" containerName="nova-metadata-metadata" Oct 11 07:17:05 crc kubenswrapper[5055]: E1011 07:17:05.040279 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="749782e2-8a84-42d3-ab86-edcf45344967" containerName="nova-metadata-log" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.040285 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="749782e2-8a84-42d3-ab86-edcf45344967" containerName="nova-metadata-log" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.040935 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="749782e2-8a84-42d3-ab86-edcf45344967" containerName="nova-metadata-log" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.040961 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="749782e2-8a84-42d3-ab86-edcf45344967" containerName="nova-metadata-metadata" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.044959 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.045536 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.047535 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.048599 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.219258 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lthfp\" (UniqueName: \"kubernetes.io/projected/056ae819-c243-4aa7-8214-1432f198dcab-kube-api-access-lthfp\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.219339 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.219421 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-config-data\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.219445 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.219465 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/056ae819-c243-4aa7-8214-1432f198dcab-logs\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.320914 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.320995 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/056ae819-c243-4aa7-8214-1432f198dcab-logs\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.321091 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lthfp\" (UniqueName: \"kubernetes.io/projected/056ae819-c243-4aa7-8214-1432f198dcab-kube-api-access-lthfp\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.321159 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.321258 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-config-data\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.321658 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/056ae819-c243-4aa7-8214-1432f198dcab-logs\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.324654 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.325139 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.325302 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-config-data\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.338269 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lthfp\" (UniqueName: \"kubernetes.io/projected/056ae819-c243-4aa7-8214-1432f198dcab-kube-api-access-lthfp\") pod \"nova-metadata-0\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.363020 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:17:05 crc kubenswrapper[5055]: W1011 07:17:05.858100 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod056ae819_c243_4aa7_8214_1432f198dcab.slice/crio-6a920aec6d20b4cebe6fa23903df2f90d9534ebdb36d531aa902730ca0b0408e WatchSource:0}: Error finding container 6a920aec6d20b4cebe6fa23903df2f90d9534ebdb36d531aa902730ca0b0408e: Status 404 returned error can't find the container with id 6a920aec6d20b4cebe6fa23903df2f90d9534ebdb36d531aa902730ca0b0408e Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.861858 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:17:05 crc kubenswrapper[5055]: I1011 07:17:05.921117 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"056ae819-c243-4aa7-8214-1432f198dcab","Type":"ContainerStarted","Data":"6a920aec6d20b4cebe6fa23903df2f90d9534ebdb36d531aa902730ca0b0408e"} Oct 11 07:17:06 crc kubenswrapper[5055]: I1011 07:17:06.931160 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"056ae819-c243-4aa7-8214-1432f198dcab","Type":"ContainerStarted","Data":"4e2bb178a97af21102f507810962f80618de3fea812c149a4cec5443c0f81b8d"} Oct 11 07:17:06 crc kubenswrapper[5055]: I1011 07:17:06.931402 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"056ae819-c243-4aa7-8214-1432f198dcab","Type":"ContainerStarted","Data":"2a3c3a9e15d6aecd7cda27492c58fef072ebf992a74d0cfa671a938bba40e6d9"} Oct 11 07:17:06 crc kubenswrapper[5055]: I1011 07:17:06.951485 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.9514664699999997 podStartE2EDuration="2.95146647s" podCreationTimestamp="2025-10-11 07:17:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:17:06.947439657 +0000 UTC m=+1410.721713474" watchObservedRunningTime="2025-10-11 07:17:06.95146647 +0000 UTC m=+1410.725740277" Oct 11 07:17:07 crc kubenswrapper[5055]: I1011 07:17:07.004545 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="749782e2-8a84-42d3-ab86-edcf45344967" path="/var/lib/kubelet/pods/749782e2-8a84-42d3-ab86-edcf45344967/volumes" Oct 11 07:17:08 crc kubenswrapper[5055]: I1011 07:17:08.244577 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 11 07:17:10 crc kubenswrapper[5055]: I1011 07:17:10.364102 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 11 07:17:10 crc kubenswrapper[5055]: I1011 07:17:10.364449 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 11 07:17:12 crc kubenswrapper[5055]: I1011 07:17:12.240221 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 11 07:17:12 crc kubenswrapper[5055]: I1011 07:17:12.240694 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 11 07:17:13 crc kubenswrapper[5055]: I1011 07:17:13.244703 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 11 07:17:13 crc kubenswrapper[5055]: I1011 07:17:13.250974 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="451f7ab0-0020-4931-a42f-f56eb5b28201" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.199:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 11 07:17:13 crc kubenswrapper[5055]: I1011 07:17:13.250976 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="451f7ab0-0020-4931-a42f-f56eb5b28201" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.199:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 11 07:17:13 crc kubenswrapper[5055]: I1011 07:17:13.273590 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 11 07:17:14 crc kubenswrapper[5055]: I1011 07:17:14.068484 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 11 07:17:15 crc kubenswrapper[5055]: I1011 07:17:15.364534 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 11 07:17:15 crc kubenswrapper[5055]: I1011 07:17:15.364740 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 11 07:17:16 crc kubenswrapper[5055]: I1011 07:17:16.376914 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="056ae819-c243-4aa7-8214-1432f198dcab" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 11 07:17:16 crc kubenswrapper[5055]: I1011 07:17:16.376937 5055 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="056ae819-c243-4aa7-8214-1432f198dcab" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 11 07:17:22 crc kubenswrapper[5055]: I1011 07:17:22.001048 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 11 07:17:22 crc kubenswrapper[5055]: I1011 07:17:22.249012 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 11 07:17:22 crc kubenswrapper[5055]: I1011 07:17:22.249109 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 11 07:17:22 crc kubenswrapper[5055]: I1011 07:17:22.250123 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 11 07:17:22 crc kubenswrapper[5055]: I1011 07:17:22.250187 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 11 07:17:22 crc kubenswrapper[5055]: I1011 07:17:22.268858 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 11 07:17:22 crc kubenswrapper[5055]: I1011 07:17:22.276218 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 11 07:17:25 crc kubenswrapper[5055]: I1011 07:17:25.374287 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 11 07:17:25 crc kubenswrapper[5055]: I1011 07:17:25.376846 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 11 07:17:25 crc kubenswrapper[5055]: I1011 07:17:25.385147 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 11 07:17:26 crc kubenswrapper[5055]: I1011 07:17:26.113585 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 11 07:17:29 crc kubenswrapper[5055]: I1011 07:17:29.757153 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rv5w6"] Oct 11 07:17:29 crc kubenswrapper[5055]: I1011 07:17:29.759734 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:29 crc kubenswrapper[5055]: I1011 07:17:29.781310 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rv5w6"] Oct 11 07:17:29 crc kubenswrapper[5055]: I1011 07:17:29.880618 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/137df884-34ff-4d8a-aa73-4f81e7f812e2-catalog-content\") pod \"certified-operators-rv5w6\" (UID: \"137df884-34ff-4d8a-aa73-4f81e7f812e2\") " pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:29 crc kubenswrapper[5055]: I1011 07:17:29.880685 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpjlz\" (UniqueName: \"kubernetes.io/projected/137df884-34ff-4d8a-aa73-4f81e7f812e2-kube-api-access-vpjlz\") pod \"certified-operators-rv5w6\" (UID: \"137df884-34ff-4d8a-aa73-4f81e7f812e2\") " pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:29 crc kubenswrapper[5055]: I1011 07:17:29.880777 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/137df884-34ff-4d8a-aa73-4f81e7f812e2-utilities\") pod \"certified-operators-rv5w6\" (UID: \"137df884-34ff-4d8a-aa73-4f81e7f812e2\") " pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:29 crc kubenswrapper[5055]: I1011 07:17:29.982394 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/137df884-34ff-4d8a-aa73-4f81e7f812e2-catalog-content\") pod \"certified-operators-rv5w6\" (UID: \"137df884-34ff-4d8a-aa73-4f81e7f812e2\") " pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:29 crc kubenswrapper[5055]: I1011 07:17:29.982669 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpjlz\" (UniqueName: \"kubernetes.io/projected/137df884-34ff-4d8a-aa73-4f81e7f812e2-kube-api-access-vpjlz\") pod \"certified-operators-rv5w6\" (UID: \"137df884-34ff-4d8a-aa73-4f81e7f812e2\") " pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:29 crc kubenswrapper[5055]: I1011 07:17:29.982795 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/137df884-34ff-4d8a-aa73-4f81e7f812e2-utilities\") pod \"certified-operators-rv5w6\" (UID: \"137df884-34ff-4d8a-aa73-4f81e7f812e2\") " pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:29 crc kubenswrapper[5055]: I1011 07:17:29.982949 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/137df884-34ff-4d8a-aa73-4f81e7f812e2-catalog-content\") pod \"certified-operators-rv5w6\" (UID: \"137df884-34ff-4d8a-aa73-4f81e7f812e2\") " pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:29 crc kubenswrapper[5055]: I1011 07:17:29.983290 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/137df884-34ff-4d8a-aa73-4f81e7f812e2-utilities\") pod \"certified-operators-rv5w6\" (UID: \"137df884-34ff-4d8a-aa73-4f81e7f812e2\") " pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:30 crc kubenswrapper[5055]: I1011 07:17:30.002144 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpjlz\" (UniqueName: \"kubernetes.io/projected/137df884-34ff-4d8a-aa73-4f81e7f812e2-kube-api-access-vpjlz\") pod \"certified-operators-rv5w6\" (UID: \"137df884-34ff-4d8a-aa73-4f81e7f812e2\") " pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:30 crc kubenswrapper[5055]: I1011 07:17:30.105447 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:30 crc kubenswrapper[5055]: I1011 07:17:30.611616 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rv5w6"] Oct 11 07:17:31 crc kubenswrapper[5055]: I1011 07:17:31.154552 5055 generic.go:334] "Generic (PLEG): container finished" podID="137df884-34ff-4d8a-aa73-4f81e7f812e2" containerID="1604f8bddff9d8245ea4f3a3e5e8cd82000c809f1cb02e4d238abe58973de837" exitCode=0 Oct 11 07:17:31 crc kubenswrapper[5055]: I1011 07:17:31.154597 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rv5w6" event={"ID":"137df884-34ff-4d8a-aa73-4f81e7f812e2","Type":"ContainerDied","Data":"1604f8bddff9d8245ea4f3a3e5e8cd82000c809f1cb02e4d238abe58973de837"} Oct 11 07:17:31 crc kubenswrapper[5055]: I1011 07:17:31.154622 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rv5w6" event={"ID":"137df884-34ff-4d8a-aa73-4f81e7f812e2","Type":"ContainerStarted","Data":"51e13b90fba45535a09c9f60cbaea966939d586c6e3d3ac8a0becec23660294e"} Oct 11 07:17:32 crc kubenswrapper[5055]: I1011 07:17:32.164736 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rv5w6" event={"ID":"137df884-34ff-4d8a-aa73-4f81e7f812e2","Type":"ContainerStarted","Data":"ecc1363b7c132e5fb1361098b8f06274a75ebb036a6064d2eab6f3def7a7f7aa"} Oct 11 07:17:32 crc kubenswrapper[5055]: I1011 07:17:32.422183 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:17:32 crc kubenswrapper[5055]: I1011 07:17:32.422669 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:17:33 crc kubenswrapper[5055]: I1011 07:17:33.174856 5055 generic.go:334] "Generic (PLEG): container finished" podID="137df884-34ff-4d8a-aa73-4f81e7f812e2" containerID="ecc1363b7c132e5fb1361098b8f06274a75ebb036a6064d2eab6f3def7a7f7aa" exitCode=0 Oct 11 07:17:33 crc kubenswrapper[5055]: I1011 07:17:33.174940 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rv5w6" event={"ID":"137df884-34ff-4d8a-aa73-4f81e7f812e2","Type":"ContainerDied","Data":"ecc1363b7c132e5fb1361098b8f06274a75ebb036a6064d2eab6f3def7a7f7aa"} Oct 11 07:17:33 crc kubenswrapper[5055]: I1011 07:17:33.177342 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 07:17:34 crc kubenswrapper[5055]: I1011 07:17:34.184487 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rv5w6" event={"ID":"137df884-34ff-4d8a-aa73-4f81e7f812e2","Type":"ContainerStarted","Data":"8a864f365cf5ca0953a44523eb2b66c317ab51abf2689080a1dd89a1569f80f9"} Oct 11 07:17:34 crc kubenswrapper[5055]: I1011 07:17:34.205212 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rv5w6" podStartSLOduration=2.744532217 podStartE2EDuration="5.205197896s" podCreationTimestamp="2025-10-11 07:17:29 +0000 UTC" firstStartedPulling="2025-10-11 07:17:31.156540609 +0000 UTC m=+1434.930814426" lastFinishedPulling="2025-10-11 07:17:33.617206298 +0000 UTC m=+1437.391480105" observedRunningTime="2025-10-11 07:17:34.201415138 +0000 UTC m=+1437.975688945" watchObservedRunningTime="2025-10-11 07:17:34.205197896 +0000 UTC m=+1437.979471703" Oct 11 07:17:40 crc kubenswrapper[5055]: I1011 07:17:40.105878 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:40 crc kubenswrapper[5055]: I1011 07:17:40.106393 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:40 crc kubenswrapper[5055]: I1011 07:17:40.148420 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:40 crc kubenswrapper[5055]: I1011 07:17:40.283691 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:40 crc kubenswrapper[5055]: I1011 07:17:40.383097 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rv5w6"] Oct 11 07:17:42 crc kubenswrapper[5055]: I1011 07:17:42.258207 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rv5w6" podUID="137df884-34ff-4d8a-aa73-4f81e7f812e2" containerName="registry-server" containerID="cri-o://8a864f365cf5ca0953a44523eb2b66c317ab51abf2689080a1dd89a1569f80f9" gracePeriod=2 Oct 11 07:17:42 crc kubenswrapper[5055]: I1011 07:17:42.706561 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:42 crc kubenswrapper[5055]: I1011 07:17:42.803254 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/137df884-34ff-4d8a-aa73-4f81e7f812e2-catalog-content\") pod \"137df884-34ff-4d8a-aa73-4f81e7f812e2\" (UID: \"137df884-34ff-4d8a-aa73-4f81e7f812e2\") " Oct 11 07:17:42 crc kubenswrapper[5055]: I1011 07:17:42.803338 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vpjlz\" (UniqueName: \"kubernetes.io/projected/137df884-34ff-4d8a-aa73-4f81e7f812e2-kube-api-access-vpjlz\") pod \"137df884-34ff-4d8a-aa73-4f81e7f812e2\" (UID: \"137df884-34ff-4d8a-aa73-4f81e7f812e2\") " Oct 11 07:17:42 crc kubenswrapper[5055]: I1011 07:17:42.803373 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/137df884-34ff-4d8a-aa73-4f81e7f812e2-utilities\") pod \"137df884-34ff-4d8a-aa73-4f81e7f812e2\" (UID: \"137df884-34ff-4d8a-aa73-4f81e7f812e2\") " Oct 11 07:17:42 crc kubenswrapper[5055]: I1011 07:17:42.804595 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/137df884-34ff-4d8a-aa73-4f81e7f812e2-utilities" (OuterVolumeSpecName: "utilities") pod "137df884-34ff-4d8a-aa73-4f81e7f812e2" (UID: "137df884-34ff-4d8a-aa73-4f81e7f812e2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:42 crc kubenswrapper[5055]: I1011 07:17:42.808994 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/137df884-34ff-4d8a-aa73-4f81e7f812e2-kube-api-access-vpjlz" (OuterVolumeSpecName: "kube-api-access-vpjlz") pod "137df884-34ff-4d8a-aa73-4f81e7f812e2" (UID: "137df884-34ff-4d8a-aa73-4f81e7f812e2"). InnerVolumeSpecName "kube-api-access-vpjlz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:42 crc kubenswrapper[5055]: I1011 07:17:42.851893 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/137df884-34ff-4d8a-aa73-4f81e7f812e2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "137df884-34ff-4d8a-aa73-4f81e7f812e2" (UID: "137df884-34ff-4d8a-aa73-4f81e7f812e2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:42 crc kubenswrapper[5055]: I1011 07:17:42.907254 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/137df884-34ff-4d8a-aa73-4f81e7f812e2-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:42 crc kubenswrapper[5055]: I1011 07:17:42.907306 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vpjlz\" (UniqueName: \"kubernetes.io/projected/137df884-34ff-4d8a-aa73-4f81e7f812e2-kube-api-access-vpjlz\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:42 crc kubenswrapper[5055]: I1011 07:17:42.907316 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/137df884-34ff-4d8a-aa73-4f81e7f812e2-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.272324 5055 generic.go:334] "Generic (PLEG): container finished" podID="137df884-34ff-4d8a-aa73-4f81e7f812e2" containerID="8a864f365cf5ca0953a44523eb2b66c317ab51abf2689080a1dd89a1569f80f9" exitCode=0 Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.272390 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rv5w6" Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.272411 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rv5w6" event={"ID":"137df884-34ff-4d8a-aa73-4f81e7f812e2","Type":"ContainerDied","Data":"8a864f365cf5ca0953a44523eb2b66c317ab51abf2689080a1dd89a1569f80f9"} Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.272743 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rv5w6" event={"ID":"137df884-34ff-4d8a-aa73-4f81e7f812e2","Type":"ContainerDied","Data":"51e13b90fba45535a09c9f60cbaea966939d586c6e3d3ac8a0becec23660294e"} Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.272787 5055 scope.go:117] "RemoveContainer" containerID="8a864f365cf5ca0953a44523eb2b66c317ab51abf2689080a1dd89a1569f80f9" Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.301559 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rv5w6"] Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.303221 5055 scope.go:117] "RemoveContainer" containerID="ecc1363b7c132e5fb1361098b8f06274a75ebb036a6064d2eab6f3def7a7f7aa" Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.313038 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rv5w6"] Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.325105 5055 scope.go:117] "RemoveContainer" containerID="1604f8bddff9d8245ea4f3a3e5e8cd82000c809f1cb02e4d238abe58973de837" Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.399498 5055 scope.go:117] "RemoveContainer" containerID="8a864f365cf5ca0953a44523eb2b66c317ab51abf2689080a1dd89a1569f80f9" Oct 11 07:17:43 crc kubenswrapper[5055]: E1011 07:17:43.399838 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a864f365cf5ca0953a44523eb2b66c317ab51abf2689080a1dd89a1569f80f9\": container with ID starting with 8a864f365cf5ca0953a44523eb2b66c317ab51abf2689080a1dd89a1569f80f9 not found: ID does not exist" containerID="8a864f365cf5ca0953a44523eb2b66c317ab51abf2689080a1dd89a1569f80f9" Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.399871 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a864f365cf5ca0953a44523eb2b66c317ab51abf2689080a1dd89a1569f80f9"} err="failed to get container status \"8a864f365cf5ca0953a44523eb2b66c317ab51abf2689080a1dd89a1569f80f9\": rpc error: code = NotFound desc = could not find container \"8a864f365cf5ca0953a44523eb2b66c317ab51abf2689080a1dd89a1569f80f9\": container with ID starting with 8a864f365cf5ca0953a44523eb2b66c317ab51abf2689080a1dd89a1569f80f9 not found: ID does not exist" Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.399891 5055 scope.go:117] "RemoveContainer" containerID="ecc1363b7c132e5fb1361098b8f06274a75ebb036a6064d2eab6f3def7a7f7aa" Oct 11 07:17:43 crc kubenswrapper[5055]: E1011 07:17:43.400070 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecc1363b7c132e5fb1361098b8f06274a75ebb036a6064d2eab6f3def7a7f7aa\": container with ID starting with ecc1363b7c132e5fb1361098b8f06274a75ebb036a6064d2eab6f3def7a7f7aa not found: ID does not exist" containerID="ecc1363b7c132e5fb1361098b8f06274a75ebb036a6064d2eab6f3def7a7f7aa" Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.400100 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecc1363b7c132e5fb1361098b8f06274a75ebb036a6064d2eab6f3def7a7f7aa"} err="failed to get container status \"ecc1363b7c132e5fb1361098b8f06274a75ebb036a6064d2eab6f3def7a7f7aa\": rpc error: code = NotFound desc = could not find container \"ecc1363b7c132e5fb1361098b8f06274a75ebb036a6064d2eab6f3def7a7f7aa\": container with ID starting with ecc1363b7c132e5fb1361098b8f06274a75ebb036a6064d2eab6f3def7a7f7aa not found: ID does not exist" Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.400112 5055 scope.go:117] "RemoveContainer" containerID="1604f8bddff9d8245ea4f3a3e5e8cd82000c809f1cb02e4d238abe58973de837" Oct 11 07:17:43 crc kubenswrapper[5055]: E1011 07:17:43.400552 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1604f8bddff9d8245ea4f3a3e5e8cd82000c809f1cb02e4d238abe58973de837\": container with ID starting with 1604f8bddff9d8245ea4f3a3e5e8cd82000c809f1cb02e4d238abe58973de837 not found: ID does not exist" containerID="1604f8bddff9d8245ea4f3a3e5e8cd82000c809f1cb02e4d238abe58973de837" Oct 11 07:17:43 crc kubenswrapper[5055]: I1011 07:17:43.400604 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1604f8bddff9d8245ea4f3a3e5e8cd82000c809f1cb02e4d238abe58973de837"} err="failed to get container status \"1604f8bddff9d8245ea4f3a3e5e8cd82000c809f1cb02e4d238abe58973de837\": rpc error: code = NotFound desc = could not find container \"1604f8bddff9d8245ea4f3a3e5e8cd82000c809f1cb02e4d238abe58973de837\": container with ID starting with 1604f8bddff9d8245ea4f3a3e5e8cd82000c809f1cb02e4d238abe58973de837 not found: ID does not exist" Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.004600 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="137df884-34ff-4d8a-aa73-4f81e7f812e2" path="/var/lib/kubelet/pods/137df884-34ff-4d8a-aa73-4f81e7f812e2/volumes" Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.592111 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.592328 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="491a98fd-dad0-4515-a217-c01bd4aa741d" containerName="openstackclient" containerID="cri-o://717aceea1665af2d47ebfdceb4d8f53a94f23eb3af20babe5546bf58c47b4764" gracePeriod=2 Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.609309 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.843422 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.900168 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.900943 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="5a2fd839-7187-4fce-be78-2a911103f8cc" containerName="openstack-network-exporter" containerID="cri-o://0f67a32ae0f476c9f1b1e19bc12c766c0df90a9fc5070dfdfc272f9a4725310f" gracePeriod=300 Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.923839 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placementd9e7-account-delete-jsjrs"] Oct 11 07:17:45 crc kubenswrapper[5055]: E1011 07:17:45.924375 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="491a98fd-dad0-4515-a217-c01bd4aa741d" containerName="openstackclient" Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.924399 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="491a98fd-dad0-4515-a217-c01bd4aa741d" containerName="openstackclient" Oct 11 07:17:45 crc kubenswrapper[5055]: E1011 07:17:45.924441 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="137df884-34ff-4d8a-aa73-4f81e7f812e2" containerName="extract-content" Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.924450 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="137df884-34ff-4d8a-aa73-4f81e7f812e2" containerName="extract-content" Oct 11 07:17:45 crc kubenswrapper[5055]: E1011 07:17:45.924473 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="137df884-34ff-4d8a-aa73-4f81e7f812e2" containerName="registry-server" Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.924481 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="137df884-34ff-4d8a-aa73-4f81e7f812e2" containerName="registry-server" Oct 11 07:17:45 crc kubenswrapper[5055]: E1011 07:17:45.924491 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="137df884-34ff-4d8a-aa73-4f81e7f812e2" containerName="extract-utilities" Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.924499 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="137df884-34ff-4d8a-aa73-4f81e7f812e2" containerName="extract-utilities" Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.924726 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="491a98fd-dad0-4515-a217-c01bd4aa741d" containerName="openstackclient" Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.924780 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="137df884-34ff-4d8a-aa73-4f81e7f812e2" containerName="registry-server" Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.925560 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placementd9e7-account-delete-jsjrs" Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.955538 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-2l5hh"] Oct 11 07:17:45 crc kubenswrapper[5055]: E1011 07:17:45.966873 5055 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Oct 11 07:17:45 crc kubenswrapper[5055]: E1011 07:17:45.966932 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data podName:6baacc00-a270-4662-ba67-aad18287df2c nodeName:}" failed. No retries permitted until 2025-10-11 07:17:46.466915176 +0000 UTC m=+1450.241188983 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data") pod "rabbitmq-cell1-server-0" (UID: "6baacc00-a270-4662-ba67-aad18287df2c") : configmap "rabbitmq-cell1-config-data" not found Oct 11 07:17:45 crc kubenswrapper[5055]: I1011 07:17:45.978332 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placementd9e7-account-delete-jsjrs"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.010173 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-5t8kh"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.065342 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mglh\" (UniqueName: \"kubernetes.io/projected/f99e172e-068d-423a-b797-2467e352a41b-kube-api-access-9mglh\") pod \"placementd9e7-account-delete-jsjrs\" (UID: \"f99e172e-068d-423a-b797-2467e352a41b\") " pod="openstack/placementd9e7-account-delete-jsjrs" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.112618 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.124150 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="5a2fd839-7187-4fce-be78-2a911103f8cc" containerName="ovsdbserver-nb" containerID="cri-o://592f9a3bef2915f38f89bd3def34fbbcb1bece166f2fe29ce499fa62c6995ab3" gracePeriod=300 Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.146273 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican1232-account-delete-ct7zg"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.161213 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican1232-account-delete-ct7zg" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.168295 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mglh\" (UniqueName: \"kubernetes.io/projected/f99e172e-068d-423a-b797-2467e352a41b-kube-api-access-9mglh\") pod \"placementd9e7-account-delete-jsjrs\" (UID: \"f99e172e-068d-423a-b797-2467e352a41b\") " pod="openstack/placementd9e7-account-delete-jsjrs" Oct 11 07:17:46 crc kubenswrapper[5055]: E1011 07:17:46.170652 5055 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-2l5hh" message=< Oct 11 07:17:46 crc kubenswrapper[5055]: Exiting ovn-controller (1) [ OK ] Oct 11 07:17:46 crc kubenswrapper[5055]: > Oct 11 07:17:46 crc kubenswrapper[5055]: E1011 07:17:46.170677 5055 kuberuntime_container.go:691] "PreStop hook failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " pod="openstack/ovn-controller-2l5hh" podUID="51322291-81d0-4cbc-a761-0294a8365fd3" containerName="ovn-controller" containerID="cri-o://027b40b9cb9c6878b9ee08889d959efee5dacfac1f9095f8704e25123d3fecd3" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.170703 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-2l5hh" podUID="51322291-81d0-4cbc-a761-0294a8365fd3" containerName="ovn-controller" containerID="cri-o://027b40b9cb9c6878b9ee08889d959efee5dacfac1f9095f8704e25123d3fecd3" gracePeriod=30 Oct 11 07:17:46 crc kubenswrapper[5055]: E1011 07:17:46.170834 5055 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Oct 11 07:17:46 crc kubenswrapper[5055]: E1011 07:17:46.170871 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data podName:80597a79-e3fd-41cd-b035-a35494775fcb nodeName:}" failed. No retries permitted until 2025-10-11 07:17:46.670856769 +0000 UTC m=+1450.445130566 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data") pod "rabbitmq-server-0" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb") : configmap "rabbitmq-config-data" not found Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.179278 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican1232-account-delete-ct7zg"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.235395 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder0232-account-delete-6nzbr"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.235760 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mglh\" (UniqueName: \"kubernetes.io/projected/f99e172e-068d-423a-b797-2467e352a41b-kube-api-access-9mglh\") pod \"placementd9e7-account-delete-jsjrs\" (UID: \"f99e172e-068d-423a-b797-2467e352a41b\") " pod="openstack/placementd9e7-account-delete-jsjrs" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.236599 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder0232-account-delete-6nzbr" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.259961 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder0232-account-delete-6nzbr"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.271356 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtnhh\" (UniqueName: \"kubernetes.io/projected/c647ccff-95d1-467a-9fcd-4b5110772361-kube-api-access-xtnhh\") pod \"cinder0232-account-delete-6nzbr\" (UID: \"c647ccff-95d1-467a-9fcd-4b5110772361\") " pod="openstack/cinder0232-account-delete-6nzbr" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.271417 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2bnj\" (UniqueName: \"kubernetes.io/projected/84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7-kube-api-access-f2bnj\") pod \"barbican1232-account-delete-ct7zg\" (UID: \"84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7\") " pod="openstack/barbican1232-account-delete-ct7zg" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.271615 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placementd9e7-account-delete-jsjrs" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.292955 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-p2xsl"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.293611 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-p2xsl" podUID="beb2c0e8-4291-454c-aae1-28c186965761" containerName="openstack-network-exporter" containerID="cri-o://f24f414c261ba713dd02a74d75b54a85809df878427bfe504f61940fd8a374e3" gracePeriod=30 Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.364551 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5a2fd839-7187-4fce-be78-2a911103f8cc/ovsdbserver-nb/0.log" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.364597 5055 generic.go:334] "Generic (PLEG): container finished" podID="5a2fd839-7187-4fce-be78-2a911103f8cc" containerID="0f67a32ae0f476c9f1b1e19bc12c766c0df90a9fc5070dfdfc272f9a4725310f" exitCode=2 Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.364616 5055 generic.go:334] "Generic (PLEG): container finished" podID="5a2fd839-7187-4fce-be78-2a911103f8cc" containerID="592f9a3bef2915f38f89bd3def34fbbcb1bece166f2fe29ce499fa62c6995ab3" exitCode=143 Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.364635 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5a2fd839-7187-4fce-be78-2a911103f8cc","Type":"ContainerDied","Data":"0f67a32ae0f476c9f1b1e19bc12c766c0df90a9fc5070dfdfc272f9a4725310f"} Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.364666 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5a2fd839-7187-4fce-be78-2a911103f8cc","Type":"ContainerDied","Data":"592f9a3bef2915f38f89bd3def34fbbcb1bece166f2fe29ce499fa62c6995ab3"} Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.372606 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtnhh\" (UniqueName: \"kubernetes.io/projected/c647ccff-95d1-467a-9fcd-4b5110772361-kube-api-access-xtnhh\") pod \"cinder0232-account-delete-6nzbr\" (UID: \"c647ccff-95d1-467a-9fcd-4b5110772361\") " pod="openstack/cinder0232-account-delete-6nzbr" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.372674 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2bnj\" (UniqueName: \"kubernetes.io/projected/84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7-kube-api-access-f2bnj\") pod \"barbican1232-account-delete-ct7zg\" (UID: \"84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7\") " pod="openstack/barbican1232-account-delete-ct7zg" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.391205 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-6lhht"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.403636 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2bnj\" (UniqueName: \"kubernetes.io/projected/84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7-kube-api-access-f2bnj\") pod \"barbican1232-account-delete-ct7zg\" (UID: \"84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7\") " pod="openstack/barbican1232-account-delete-ct7zg" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.410788 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtnhh\" (UniqueName: \"kubernetes.io/projected/c647ccff-95d1-467a-9fcd-4b5110772361-kube-api-access-xtnhh\") pod \"cinder0232-account-delete-6nzbr\" (UID: \"c647ccff-95d1-467a-9fcd-4b5110772361\") " pod="openstack/cinder0232-account-delete-6nzbr" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.422984 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-6lhht"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.437818 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-bk7mn"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.440628 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-bk7mn"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.466547 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance5ef3-account-delete-px4qv"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.467708 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance5ef3-account-delete-px4qv" Oct 11 07:17:46 crc kubenswrapper[5055]: E1011 07:17:46.477637 5055 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Oct 11 07:17:46 crc kubenswrapper[5055]: E1011 07:17:46.477706 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data podName:6baacc00-a270-4662-ba67-aad18287df2c nodeName:}" failed. No retries permitted until 2025-10-11 07:17:47.477692641 +0000 UTC m=+1451.251966448 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data") pod "rabbitmq-cell1-server-0" (UID: "6baacc00-a270-4662-ba67-aad18287df2c") : configmap "rabbitmq-cell1-config-data" not found Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.481337 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.481586 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="86907505-2c19-41ed-b72d-0b8bbeab1eec" containerName="ovn-northd" containerID="cri-o://85214f65e9b2ae3cca29ac8e1565c1deb2a9429c77e7931a85221321ed7db87e" gracePeriod=30 Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.481708 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="86907505-2c19-41ed-b72d-0b8bbeab1eec" containerName="openstack-network-exporter" containerID="cri-o://5da9581c5841f93d450f53683dcf5e30ef31a1b137be5b1ebde5cd58b90187ab" gracePeriod=30 Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.492977 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican1232-account-delete-ct7zg" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.522458 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-hjxfj"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.568946 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance5ef3-account-delete-px4qv"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.624797 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-hjxfj"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.660224 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder0232-account-delete-6nzbr" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.690324 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpwcq\" (UniqueName: \"kubernetes.io/projected/f360ed99-6efd-4281-a18b-7597df5341b6-kube-api-access-jpwcq\") pod \"glance5ef3-account-delete-px4qv\" (UID: \"f360ed99-6efd-4281-a18b-7597df5341b6\") " pod="openstack/glance5ef3-account-delete-px4qv" Oct 11 07:17:46 crc kubenswrapper[5055]: E1011 07:17:46.690519 5055 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Oct 11 07:17:46 crc kubenswrapper[5055]: E1011 07:17:46.690576 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data podName:80597a79-e3fd-41cd-b035-a35494775fcb nodeName:}" failed. No retries permitted until 2025-10-11 07:17:47.690557239 +0000 UTC m=+1451.464831046 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data") pod "rabbitmq-server-0" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb") : configmap "rabbitmq-config-data" not found Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.696348 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell14c23-account-delete-sgt68"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.699073 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell14c23-account-delete-sgt68" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.782822 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell14c23-account-delete-sgt68"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.797616 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4ptm\" (UniqueName: \"kubernetes.io/projected/acf7edb2-ebde-4783-b428-b6ce809be9f9-kube-api-access-l4ptm\") pod \"novacell14c23-account-delete-sgt68\" (UID: \"acf7edb2-ebde-4783-b428-b6ce809be9f9\") " pod="openstack/novacell14c23-account-delete-sgt68" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.797671 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpwcq\" (UniqueName: \"kubernetes.io/projected/f360ed99-6efd-4281-a18b-7597df5341b6-kube-api-access-jpwcq\") pod \"glance5ef3-account-delete-px4qv\" (UID: \"f360ed99-6efd-4281-a18b-7597df5341b6\") " pod="openstack/glance5ef3-account-delete-px4qv" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.821830 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novaapi9573-account-delete-4cwkv"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.823148 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi9573-account-delete-4cwkv" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.832347 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpwcq\" (UniqueName: \"kubernetes.io/projected/f360ed99-6efd-4281-a18b-7597df5341b6-kube-api-access-jpwcq\") pod \"glance5ef3-account-delete-px4qv\" (UID: \"f360ed99-6efd-4281-a18b-7597df5341b6\") " pod="openstack/glance5ef3-account-delete-px4qv" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.870114 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapi9573-account-delete-4cwkv"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.896827 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell0372a-account-delete-68qnz"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.898121 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0372a-account-delete-68qnz" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.900472 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4ptm\" (UniqueName: \"kubernetes.io/projected/acf7edb2-ebde-4783-b428-b6ce809be9f9-kube-api-access-l4ptm\") pod \"novacell14c23-account-delete-sgt68\" (UID: \"acf7edb2-ebde-4783-b428-b6ce809be9f9\") " pod="openstack/novacell14c23-account-delete-sgt68" Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.928089 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0372a-account-delete-68qnz"] Oct 11 07:17:46 crc kubenswrapper[5055]: I1011 07:17:46.947874 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4ptm\" (UniqueName: \"kubernetes.io/projected/acf7edb2-ebde-4783-b428-b6ce809be9f9-kube-api-access-l4ptm\") pod \"novacell14c23-account-delete-sgt68\" (UID: \"acf7edb2-ebde-4783-b428-b6ce809be9f9\") " pod="openstack/novacell14c23-account-delete-sgt68" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.001831 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zc4n\" (UniqueName: \"kubernetes.io/projected/4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241-kube-api-access-7zc4n\") pod \"novaapi9573-account-delete-4cwkv\" (UID: \"4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241\") " pod="openstack/novaapi9573-account-delete-4cwkv" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.001981 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fpdr\" (UniqueName: \"kubernetes.io/projected/01e71e0c-56f0-4bfc-bd58-501785a5d235-kube-api-access-8fpdr\") pod \"novacell0372a-account-delete-68qnz\" (UID: \"01e71e0c-56f0-4bfc-bd58-501785a5d235\") " pod="openstack/novacell0372a-account-delete-68qnz" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.058256 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance5ef3-account-delete-px4qv" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.060153 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11f3f545-5226-44e9-956b-1c79012e5a74" path="/var/lib/kubelet/pods/11f3f545-5226-44e9-956b-1c79012e5a74/volumes" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.084357 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c34e566-ca8a-4c34-a24e-5401cf63666b" path="/var/lib/kubelet/pods/4c34e566-ca8a-4c34-a24e-5401cf63666b/volumes" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.105580 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ba129d1-5c6b-41a7-9a7f-d469ad919b75" path="/var/lib/kubelet/pods/6ba129d1-5c6b-41a7-9a7f-d469ad919b75/volumes" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.112823 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fpdr\" (UniqueName: \"kubernetes.io/projected/01e71e0c-56f0-4bfc-bd58-501785a5d235-kube-api-access-8fpdr\") pod \"novacell0372a-account-delete-68qnz\" (UID: \"01e71e0c-56f0-4bfc-bd58-501785a5d235\") " pod="openstack/novacell0372a-account-delete-68qnz" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.112958 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zc4n\" (UniqueName: \"kubernetes.io/projected/4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241-kube-api-access-7zc4n\") pod \"novaapi9573-account-delete-4cwkv\" (UID: \"4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241\") " pod="openstack/novaapi9573-account-delete-4cwkv" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.116383 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell14c23-account-delete-sgt68" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.119798 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d4d96bb9-xs84k"] Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.121515 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" podUID="e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" containerName="dnsmasq-dns" containerID="cri-o://d602498d4dd07089c1c212cf834fc53bf0f377b39657719ccd2cf1f0f61cf4e8" gracePeriod=10 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.198375 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zc4n\" (UniqueName: \"kubernetes.io/projected/4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241-kube-api-access-7zc4n\") pod \"novaapi9573-account-delete-4cwkv\" (UID: \"4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241\") " pod="openstack/novaapi9573-account-delete-4cwkv" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.210404 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fpdr\" (UniqueName: \"kubernetes.io/projected/01e71e0c-56f0-4bfc-bd58-501785a5d235-kube-api-access-8fpdr\") pod \"novacell0372a-account-delete-68qnz\" (UID: \"01e71e0c-56f0-4bfc-bd58-501785a5d235\") " pod="openstack/novacell0372a-account-delete-68qnz" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.239524 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0372a-account-delete-68qnz" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.242602 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-jb7ms"] Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.286847 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-jb7ms"] Oct 11 07:17:47 crc kubenswrapper[5055]: E1011 07:17:47.313181 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="85214f65e9b2ae3cca29ac8e1565c1deb2a9429c77e7931a85221321ed7db87e" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.317141 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.317986 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="728d8a1e-9c94-49ce-94f4-491bf34a9b16" containerName="openstack-network-exporter" containerID="cri-o://4150f4280b66ed07a18507fa2549cb34d90ba3d5f369125336c5e32bea64bc9a" gracePeriod=300 Oct 11 07:17:47 crc kubenswrapper[5055]: E1011 07:17:47.375913 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="85214f65e9b2ae3cca29ac8e1565c1deb2a9429c77e7931a85221321ed7db87e" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.381244 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-9f95b955d-vptlr"] Oct 11 07:17:47 crc kubenswrapper[5055]: E1011 07:17:47.381888 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="85214f65e9b2ae3cca29ac8e1565c1deb2a9429c77e7931a85221321ed7db87e" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Oct 11 07:17:47 crc kubenswrapper[5055]: E1011 07:17:47.382884 5055 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="86907505-2c19-41ed-b72d-0b8bbeab1eec" containerName="ovn-northd" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.383322 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-9f95b955d-vptlr" podUID="c7e77464-d0e5-4a9b-97a1-557cac810654" containerName="placement-log" containerID="cri-o://8d5ba7c659e482e9feef4958452dbc1a0c5e75bab6d21a07960f5948ca9ca733" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.383985 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-9f95b955d-vptlr" podUID="c7e77464-d0e5-4a9b-97a1-557cac810654" containerName="placement-api" containerID="cri-o://82a484cebdaeca44b956dd6026c1a5e65bf7f6fa2ec01d1293e086f33d6250f6" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.405833 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-wjshb"] Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.416903 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-wjshb"] Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.430299 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-bh6wt"] Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.434073 5055 generic.go:334] "Generic (PLEG): container finished" podID="51322291-81d0-4cbc-a761-0294a8365fd3" containerID="027b40b9cb9c6878b9ee08889d959efee5dacfac1f9095f8704e25123d3fecd3" exitCode=0 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.434123 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2l5hh" event={"ID":"51322291-81d0-4cbc-a761-0294a8365fd3","Type":"ContainerDied","Data":"027b40b9cb9c6878b9ee08889d959efee5dacfac1f9095f8704e25123d3fecd3"} Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.450911 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-bh6wt"] Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.451328 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi9573-account-delete-4cwkv" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.452573 5055 generic.go:334] "Generic (PLEG): container finished" podID="86907505-2c19-41ed-b72d-0b8bbeab1eec" containerID="5da9581c5841f93d450f53683dcf5e30ef31a1b137be5b1ebde5cd58b90187ab" exitCode=2 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.452622 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"86907505-2c19-41ed-b72d-0b8bbeab1eec","Type":"ContainerDied","Data":"5da9581c5841f93d450f53683dcf5e30ef31a1b137be5b1ebde5cd58b90187ab"} Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.464644 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-6x24c"] Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.472881 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-6x24c"] Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.502065 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-p2xsl_beb2c0e8-4291-454c-aae1-28c186965761/openstack-network-exporter/0.log" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.502115 5055 generic.go:334] "Generic (PLEG): container finished" podID="beb2c0e8-4291-454c-aae1-28c186965761" containerID="f24f414c261ba713dd02a74d75b54a85809df878427bfe504f61940fd8a374e3" exitCode=2 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.502149 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-p2xsl" event={"ID":"beb2c0e8-4291-454c-aae1-28c186965761","Type":"ContainerDied","Data":"f24f414c261ba713dd02a74d75b54a85809df878427bfe504f61940fd8a374e3"} Oct 11 07:17:47 crc kubenswrapper[5055]: E1011 07:17:47.523632 5055 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Oct 11 07:17:47 crc kubenswrapper[5055]: E1011 07:17:47.523742 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data podName:6baacc00-a270-4662-ba67-aad18287df2c nodeName:}" failed. No retries permitted until 2025-10-11 07:17:49.523725428 +0000 UTC m=+1453.297999235 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data") pod "rabbitmq-cell1-server-0" (UID: "6baacc00-a270-4662-ba67-aad18287df2c") : configmap "rabbitmq-cell1-config-data" not found Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.525347 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5a2fd839-7187-4fce-be78-2a911103f8cc/ovsdbserver-nb/0.log" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.525408 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.538934 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="728d8a1e-9c94-49ce-94f4-491bf34a9b16" containerName="ovsdbserver-sb" containerID="cri-o://0461a09292ab9015ca5bd80c2894512da59d2f14cb2c366a1cfd852dcb6c1234" gracePeriod=300 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.585000 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovs-vswitchd" containerID="cri-o://412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" gracePeriod=29 Oct 11 07:17:47 crc kubenswrapper[5055]: E1011 07:17:47.585026 5055 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Oct 11 07:17:47 crc kubenswrapper[5055]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Oct 11 07:17:47 crc kubenswrapper[5055]: + source /usr/local/bin/container-scripts/functions Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNBridge=br-int Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNRemote=tcp:localhost:6642 Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNEncapType=geneve Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNAvailabilityZones= Oct 11 07:17:47 crc kubenswrapper[5055]: ++ EnableChassisAsGateway=true Oct 11 07:17:47 crc kubenswrapper[5055]: ++ PhysicalNetworks= Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNHostName= Oct 11 07:17:47 crc kubenswrapper[5055]: ++ DB_FILE=/etc/openvswitch/conf.db Oct 11 07:17:47 crc kubenswrapper[5055]: ++ ovs_dir=/var/lib/openvswitch Oct 11 07:17:47 crc kubenswrapper[5055]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Oct 11 07:17:47 crc kubenswrapper[5055]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Oct 11 07:17:47 crc kubenswrapper[5055]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Oct 11 07:17:47 crc kubenswrapper[5055]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Oct 11 07:17:47 crc kubenswrapper[5055]: + sleep 0.5 Oct 11 07:17:47 crc kubenswrapper[5055]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Oct 11 07:17:47 crc kubenswrapper[5055]: + sleep 0.5 Oct 11 07:17:47 crc kubenswrapper[5055]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Oct 11 07:17:47 crc kubenswrapper[5055]: + cleanup_ovsdb_server_semaphore Oct 11 07:17:47 crc kubenswrapper[5055]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Oct 11 07:17:47 crc kubenswrapper[5055]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Oct 11 07:17:47 crc kubenswrapper[5055]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-5t8kh" message=< Oct 11 07:17:47 crc kubenswrapper[5055]: Exiting ovsdb-server (5) [ OK ] Oct 11 07:17:47 crc kubenswrapper[5055]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Oct 11 07:17:47 crc kubenswrapper[5055]: + source /usr/local/bin/container-scripts/functions Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNBridge=br-int Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNRemote=tcp:localhost:6642 Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNEncapType=geneve Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNAvailabilityZones= Oct 11 07:17:47 crc kubenswrapper[5055]: ++ EnableChassisAsGateway=true Oct 11 07:17:47 crc kubenswrapper[5055]: ++ PhysicalNetworks= Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNHostName= Oct 11 07:17:47 crc kubenswrapper[5055]: ++ DB_FILE=/etc/openvswitch/conf.db Oct 11 07:17:47 crc kubenswrapper[5055]: ++ ovs_dir=/var/lib/openvswitch Oct 11 07:17:47 crc kubenswrapper[5055]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Oct 11 07:17:47 crc kubenswrapper[5055]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Oct 11 07:17:47 crc kubenswrapper[5055]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Oct 11 07:17:47 crc kubenswrapper[5055]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Oct 11 07:17:47 crc kubenswrapper[5055]: + sleep 0.5 Oct 11 07:17:47 crc kubenswrapper[5055]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Oct 11 07:17:47 crc kubenswrapper[5055]: + sleep 0.5 Oct 11 07:17:47 crc kubenswrapper[5055]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Oct 11 07:17:47 crc kubenswrapper[5055]: + cleanup_ovsdb_server_semaphore Oct 11 07:17:47 crc kubenswrapper[5055]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Oct 11 07:17:47 crc kubenswrapper[5055]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Oct 11 07:17:47 crc kubenswrapper[5055]: > Oct 11 07:17:47 crc kubenswrapper[5055]: E1011 07:17:47.585061 5055 kuberuntime_container.go:691] "PreStop hook failed" err=< Oct 11 07:17:47 crc kubenswrapper[5055]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Oct 11 07:17:47 crc kubenswrapper[5055]: + source /usr/local/bin/container-scripts/functions Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNBridge=br-int Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNRemote=tcp:localhost:6642 Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNEncapType=geneve Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNAvailabilityZones= Oct 11 07:17:47 crc kubenswrapper[5055]: ++ EnableChassisAsGateway=true Oct 11 07:17:47 crc kubenswrapper[5055]: ++ PhysicalNetworks= Oct 11 07:17:47 crc kubenswrapper[5055]: ++ OVNHostName= Oct 11 07:17:47 crc kubenswrapper[5055]: ++ DB_FILE=/etc/openvswitch/conf.db Oct 11 07:17:47 crc kubenswrapper[5055]: ++ ovs_dir=/var/lib/openvswitch Oct 11 07:17:47 crc kubenswrapper[5055]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Oct 11 07:17:47 crc kubenswrapper[5055]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Oct 11 07:17:47 crc kubenswrapper[5055]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Oct 11 07:17:47 crc kubenswrapper[5055]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Oct 11 07:17:47 crc kubenswrapper[5055]: + sleep 0.5 Oct 11 07:17:47 crc kubenswrapper[5055]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Oct 11 07:17:47 crc kubenswrapper[5055]: + sleep 0.5 Oct 11 07:17:47 crc kubenswrapper[5055]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Oct 11 07:17:47 crc kubenswrapper[5055]: + cleanup_ovsdb_server_semaphore Oct 11 07:17:47 crc kubenswrapper[5055]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Oct 11 07:17:47 crc kubenswrapper[5055]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Oct 11 07:17:47 crc kubenswrapper[5055]: > pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovsdb-server" containerID="cri-o://9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.585098 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovsdb-server" containerID="cri-o://9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" gracePeriod=29 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.600339 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2l5hh" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.624424 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-combined-ca-bundle\") pod \"5a2fd839-7187-4fce-be78-2a911103f8cc\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.624473 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-metrics-certs-tls-certs\") pod \"5a2fd839-7187-4fce-be78-2a911103f8cc\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.624535 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-ovsdbserver-nb-tls-certs\") pod \"5a2fd839-7187-4fce-be78-2a911103f8cc\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.624571 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a2fd839-7187-4fce-be78-2a911103f8cc-config\") pod \"5a2fd839-7187-4fce-be78-2a911103f8cc\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.624653 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbzp9\" (UniqueName: \"kubernetes.io/projected/5a2fd839-7187-4fce-be78-2a911103f8cc-kube-api-access-qbzp9\") pod \"5a2fd839-7187-4fce-be78-2a911103f8cc\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.624671 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"5a2fd839-7187-4fce-be78-2a911103f8cc\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.624700 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5a2fd839-7187-4fce-be78-2a911103f8cc-ovsdb-rundir\") pod \"5a2fd839-7187-4fce-be78-2a911103f8cc\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.624749 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a2fd839-7187-4fce-be78-2a911103f8cc-scripts\") pod \"5a2fd839-7187-4fce-be78-2a911103f8cc\" (UID: \"5a2fd839-7187-4fce-be78-2a911103f8cc\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.625987 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a2fd839-7187-4fce-be78-2a911103f8cc-config" (OuterVolumeSpecName: "config") pod "5a2fd839-7187-4fce-be78-2a911103f8cc" (UID: "5a2fd839-7187-4fce-be78-2a911103f8cc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.627314 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a2fd839-7187-4fce-be78-2a911103f8cc-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "5a2fd839-7187-4fce-be78-2a911103f8cc" (UID: "5a2fd839-7187-4fce-be78-2a911103f8cc"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.627708 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.627728 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a2fd839-7187-4fce-be78-2a911103f8cc-scripts" (OuterVolumeSpecName: "scripts") pod "5a2fd839-7187-4fce-be78-2a911103f8cc" (UID: "5a2fd839-7187-4fce-be78-2a911103f8cc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.627957 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="0bc70ff6-8410-4873-8030-2981e62e73f0" containerName="cinder-scheduler" containerID="cri-o://24a4cd53f46b06fb18f2255bef83ecdbf5f9ae979c0de1b64947a5d04a3b01bc" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.628065 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="0bc70ff6-8410-4873-8030-2981e62e73f0" containerName="probe" containerID="cri-o://1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.643166 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "5a2fd839-7187-4fce-be78-2a911103f8cc" (UID: "5a2fd839-7187-4fce-be78-2a911103f8cc"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.661727 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a2fd839-7187-4fce-be78-2a911103f8cc-kube-api-access-qbzp9" (OuterVolumeSpecName: "kube-api-access-qbzp9") pod "5a2fd839-7187-4fce-be78-2a911103f8cc" (UID: "5a2fd839-7187-4fce-be78-2a911103f8cc"). InnerVolumeSpecName "kube-api-access-qbzp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.692853 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-p2xsl_beb2c0e8-4291-454c-aae1-28c186965761/openstack-network-exporter/0.log" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.692930 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.726338 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-log-ovn\") pod \"51322291-81d0-4cbc-a761-0294a8365fd3\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.726616 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-run\") pod \"51322291-81d0-4cbc-a761-0294a8365fd3\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.726686 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/51322291-81d0-4cbc-a761-0294a8365fd3-ovn-controller-tls-certs\") pod \"51322291-81d0-4cbc-a761-0294a8365fd3\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.726747 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-run-ovn\") pod \"51322291-81d0-4cbc-a761-0294a8365fd3\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.727112 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgvsl\" (UniqueName: \"kubernetes.io/projected/51322291-81d0-4cbc-a761-0294a8365fd3-kube-api-access-bgvsl\") pod \"51322291-81d0-4cbc-a761-0294a8365fd3\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.727193 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51322291-81d0-4cbc-a761-0294a8365fd3-combined-ca-bundle\") pod \"51322291-81d0-4cbc-a761-0294a8365fd3\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.727305 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51322291-81d0-4cbc-a761-0294a8365fd3-scripts\") pod \"51322291-81d0-4cbc-a761-0294a8365fd3\" (UID: \"51322291-81d0-4cbc-a761-0294a8365fd3\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.728015 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbzp9\" (UniqueName: \"kubernetes.io/projected/5a2fd839-7187-4fce-be78-2a911103f8cc-kube-api-access-qbzp9\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.728108 5055 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.728165 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5a2fd839-7187-4fce-be78-2a911103f8cc-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.728216 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a2fd839-7187-4fce-be78-2a911103f8cc-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.728343 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a2fd839-7187-4fce-be78-2a911103f8cc-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.726860 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-run" (OuterVolumeSpecName: "var-run") pod "51322291-81d0-4cbc-a761-0294a8365fd3" (UID: "51322291-81d0-4cbc-a761-0294a8365fd3"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.726879 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "51322291-81d0-4cbc-a761-0294a8365fd3" (UID: "51322291-81d0-4cbc-a761-0294a8365fd3"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.726902 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "51322291-81d0-4cbc-a761-0294a8365fd3" (UID: "51322291-81d0-4cbc-a761-0294a8365fd3"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: E1011 07:17:47.729891 5055 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Oct 11 07:17:47 crc kubenswrapper[5055]: E1011 07:17:47.730236 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data podName:80597a79-e3fd-41cd-b035-a35494775fcb nodeName:}" failed. No retries permitted until 2025-10-11 07:17:49.730217744 +0000 UTC m=+1453.504491551 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data") pod "rabbitmq-server-0" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb") : configmap "rabbitmq-config-data" not found Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.732585 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51322291-81d0-4cbc-a761-0294a8365fd3-scripts" (OuterVolumeSpecName: "scripts") pod "51322291-81d0-4cbc-a761-0294a8365fd3" (UID: "51322291-81d0-4cbc-a761-0294a8365fd3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.791805 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.792560 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-server" containerID="cri-o://c874955aa943101ec45b5190a57d5bc2728a209a837dabcf4f0dd080c233522f" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.793419 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="swift-recon-cron" containerID="cri-o://06502cb3633b89b873682e6009f7ef8a153676dfb10f0635ff187c1ee1f7afb6" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.796501 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="rsync" containerID="cri-o://ec154ac9d6d4c663a8d3edda9d0ce209dd40a1836be6f8424d6593198e1668ad" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.796893 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-expirer" containerID="cri-o://f3423b44a9f0fea83f6193fc9009ffe5ed847e103301a15f805c18dd8d170f9e" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.797028 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-updater" containerID="cri-o://ccd2a2a490675a60a807b626b3856bccab72588539a3eb4bd4bf5dcf6d915355" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.797149 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-auditor" containerID="cri-o://98a3bd9053451cb0809632f4b67e138c0b75c3d90227f09fcbaeb9c60c6b6bb6" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.797669 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-replicator" containerID="cri-o://99e83bf3a8eb2b7367fbf738da227780a1807885d63f0d0e27ddf82536f1f23f" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.798312 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-server" containerID="cri-o://e8435110db69165389151e0e5ced69984b6208142b9e0d1c97ebf70f6117a07b" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.798310 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51322291-81d0-4cbc-a761-0294a8365fd3-kube-api-access-bgvsl" (OuterVolumeSpecName: "kube-api-access-bgvsl") pod "51322291-81d0-4cbc-a761-0294a8365fd3" (UID: "51322291-81d0-4cbc-a761-0294a8365fd3"). InnerVolumeSpecName "kube-api-access-bgvsl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.798549 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-updater" containerID="cri-o://29c5e0db8a9d4f0f2dc6b72ae8af6d9ada7256824853969407ab923a9710b4ca" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.798669 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-auditor" containerID="cri-o://52170fd074acd4d0b541fe66faa6dbfd0a1f3e12dbda56b05f7ebf193e4489b4" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.798815 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-replicator" containerID="cri-o://07ab4219d9bf5b7769a004dfff97e17520e92f8e30791238e6e136cf29d5a8e7" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.798919 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-server" containerID="cri-o://e41f04a80d59653f303efa7f5149b061d767b0550c8a0d24bd1f7a2ffb40a7a7" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.799011 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-reaper" containerID="cri-o://b49bbf96e1ad1276ddfb0167a4e342e71a2d3248b8dd30e171b623fe16b3e5b3" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.799102 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-auditor" containerID="cri-o://169a3663bf7e265b73d12b362a875432f15995d044bb153d925d7a1f2aded521" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.799173 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-replicator" containerID="cri-o://56af820548674816735daf22fa48f4a5a08266e6d8ffee8ee41bf2dc35f87bc1" gracePeriod=30 Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.816308 5055 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.826147 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5a2fd839-7187-4fce-be78-2a911103f8cc" (UID: "5a2fd839-7187-4fce-be78-2a911103f8cc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.829976 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51322291-81d0-4cbc-a761-0294a8365fd3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "51322291-81d0-4cbc-a761-0294a8365fd3" (UID: "51322291-81d0-4cbc-a761-0294a8365fd3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.830846 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ccp9v\" (UniqueName: \"kubernetes.io/projected/beb2c0e8-4291-454c-aae1-28c186965761-kube-api-access-ccp9v\") pod \"beb2c0e8-4291-454c-aae1-28c186965761\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.830982 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/beb2c0e8-4291-454c-aae1-28c186965761-combined-ca-bundle\") pod \"beb2c0e8-4291-454c-aae1-28c186965761\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.831062 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/beb2c0e8-4291-454c-aae1-28c186965761-config\") pod \"beb2c0e8-4291-454c-aae1-28c186965761\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.831140 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/beb2c0e8-4291-454c-aae1-28c186965761-metrics-certs-tls-certs\") pod \"beb2c0e8-4291-454c-aae1-28c186965761\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.831205 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/beb2c0e8-4291-454c-aae1-28c186965761-ovs-rundir\") pod \"beb2c0e8-4291-454c-aae1-28c186965761\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.831239 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/beb2c0e8-4291-454c-aae1-28c186965761-ovn-rundir\") pod \"beb2c0e8-4291-454c-aae1-28c186965761\" (UID: \"beb2c0e8-4291-454c-aae1-28c186965761\") " Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.831737 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.831759 5055 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-log-ovn\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.831785 5055 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-run\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.831794 5055 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/51322291-81d0-4cbc-a761-0294a8365fd3-var-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.831806 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgvsl\" (UniqueName: \"kubernetes.io/projected/51322291-81d0-4cbc-a761-0294a8365fd3-kube-api-access-bgvsl\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.831819 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51322291-81d0-4cbc-a761-0294a8365fd3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.831831 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51322291-81d0-4cbc-a761-0294a8365fd3-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.831843 5055 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.832055 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/beb2c0e8-4291-454c-aae1-28c186965761-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "beb2c0e8-4291-454c-aae1-28c186965761" (UID: "beb2c0e8-4291-454c-aae1-28c186965761"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.832327 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/beb2c0e8-4291-454c-aae1-28c186965761-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "beb2c0e8-4291-454c-aae1-28c186965761" (UID: "beb2c0e8-4291-454c-aae1-28c186965761"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.842398 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/beb2c0e8-4291-454c-aae1-28c186965761-config" (OuterVolumeSpecName: "config") pod "beb2c0e8-4291-454c-aae1-28c186965761" (UID: "beb2c0e8-4291-454c-aae1-28c186965761"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.871021 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-zkpgz"] Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.940537 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "5a2fd839-7187-4fce-be78-2a911103f8cc" (UID: "5a2fd839-7187-4fce-be78-2a911103f8cc"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.949796 5055 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.949841 5055 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/beb2c0e8-4291-454c-aae1-28c186965761-ovs-rundir\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.949855 5055 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/beb2c0e8-4291-454c-aae1-28c186965761-ovn-rundir\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.949869 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/beb2c0e8-4291-454c-aae1-28c186965761-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:47 crc kubenswrapper[5055]: I1011 07:17:47.971270 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-zkpgz"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.032706 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/beb2c0e8-4291-454c-aae1-28c186965761-kube-api-access-ccp9v" (OuterVolumeSpecName: "kube-api-access-ccp9v") pod "beb2c0e8-4291-454c-aae1-28c186965761" (UID: "beb2c0e8-4291-454c-aae1-28c186965761"). InnerVolumeSpecName "kube-api-access-ccp9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.081164 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ccp9v\" (UniqueName: \"kubernetes.io/projected/beb2c0e8-4291-454c-aae1-28c186965761-kube-api-access-ccp9v\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.145646 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "5a2fd839-7187-4fce-be78-2a911103f8cc" (UID: "5a2fd839-7187-4fce-be78-2a911103f8cc"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.196289 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a2fd839-7187-4fce-be78-2a911103f8cc-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.212746 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.213180 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="8950328a-3275-4ba9-8bd8-ea4940f2eaac" containerName="cinder-api-log" containerID="cri-o://3d5aad7ccd6e2d170e5b68480b570aef7d997fdd46672f0bca2c424d9434d752" gracePeriod=30 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.213334 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="8950328a-3275-4ba9-8bd8-ea4940f2eaac" containerName="cinder-api" containerID="cri-o://ce9f9d6bb4b08df1523515f638c8c0fd3d6d8d86faabc0808907511f9e7af1e3" gracePeriod=30 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.236051 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/beb2c0e8-4291-454c-aae1-28c186965761-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "beb2c0e8-4291-454c-aae1-28c186965761" (UID: "beb2c0e8-4291-454c-aae1-28c186965761"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.265941 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.293028 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-ktc4n"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.308408 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-58d4f6c497-jm75b"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.310119 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/beb2c0e8-4291-454c-aae1-28c186965761-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.310189 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-58d4f6c497-jm75b" podUID="d9b61d74-f126-4515-ba61-151f8ec0b48c" containerName="neutron-api" containerID="cri-o://06242da869598afa53d67090239c671f6b5e00bbf9c80c47ff54cbbc1adc7265" gracePeriod=30 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.310267 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-58d4f6c497-jm75b" podUID="d9b61d74-f126-4515-ba61-151f8ec0b48c" containerName="neutron-httpd" containerID="cri-o://1da381ef06212eb84424fa5dc015bebad8d90c047d971d7358425108cd91869c" gracePeriod=30 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.320107 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-ktc4n"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.366329 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-d9e7-account-create-lgw6n"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.395322 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placementd9e7-account-delete-jsjrs"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.406188 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/beb2c0e8-4291-454c-aae1-28c186965761-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "beb2c0e8-4291-454c-aae1-28c186965761" (UID: "beb2c0e8-4291-454c-aae1-28c186965761"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.407388 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.408859 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-d9e7-account-create-lgw6n"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.411435 5055 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/beb2c0e8-4291-454c-aae1-28c186965761-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.415877 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51322291-81d0-4cbc-a761-0294a8365fd3-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "51322291-81d0-4cbc-a761-0294a8365fd3" (UID: "51322291-81d0-4cbc-a761-0294a8365fd3"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.418170 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="6baacc00-a270-4662-ba67-aad18287df2c" containerName="rabbitmq" containerID="cri-o://afc6ef0c5aad5fc6c62e67db33d9a72dc22d3677a055603cfeadcd9d74609515" gracePeriod=604800 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.434833 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-s265b"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.459829 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-s265b"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.481495 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-b491-account-create-rmjgx"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.489738 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-rwbjm"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.503511 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-0232-account-create-rnfzp"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.512439 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-ovsdbserver-nb\") pod \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.512538 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-config\") pod \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.512640 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-dns-svc\") pod \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.512664 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-ovsdbserver-sb\") pod \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.512735 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k8xzf\" (UniqueName: \"kubernetes.io/projected/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-kube-api-access-k8xzf\") pod \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.512827 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-dns-swift-storage-0\") pod \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\" (UID: \"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.513229 5055 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/51322291-81d0-4cbc-a761-0294a8365fd3-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.522242 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican1232-account-delete-ct7zg"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.532825 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-rwbjm"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.542838 5055 generic.go:334] "Generic (PLEG): container finished" podID="491a98fd-dad0-4515-a217-c01bd4aa741d" containerID="717aceea1665af2d47ebfdceb4d8f53a94f23eb3af20babe5546bf58c47b4764" exitCode=137 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.545707 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-kube-api-access-k8xzf" (OuterVolumeSpecName: "kube-api-access-k8xzf") pod "e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" (UID: "e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac"). InnerVolumeSpecName "kube-api-access-k8xzf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.549840 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-p2xsl_beb2c0e8-4291-454c-aae1-28c186965761/openstack-network-exporter/0.log" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.549901 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-p2xsl" event={"ID":"beb2c0e8-4291-454c-aae1-28c186965761","Type":"ContainerDied","Data":"cb83d0fb3a7d29f9fb8ef3b4abb6f76d95731049ea56fa1c540c2bbfbcb54d98"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.549938 5055 scope.go:117] "RemoveContainer" containerID="f24f414c261ba713dd02a74d75b54a85809df878427bfe504f61940fd8a374e3" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.550043 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-p2xsl" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.581587 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-b491-account-create-rmjgx"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.624872 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-xsnj4"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.626190 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k8xzf\" (UniqueName: \"kubernetes.io/projected/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-kube-api-access-k8xzf\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632501 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="ec154ac9d6d4c663a8d3edda9d0ce209dd40a1836be6f8424d6593198e1668ad" exitCode=0 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632533 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="f3423b44a9f0fea83f6193fc9009ffe5ed847e103301a15f805c18dd8d170f9e" exitCode=0 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632542 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="ccd2a2a490675a60a807b626b3856bccab72588539a3eb4bd4bf5dcf6d915355" exitCode=0 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632548 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="98a3bd9053451cb0809632f4b67e138c0b75c3d90227f09fcbaeb9c60c6b6bb6" exitCode=0 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632554 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="99e83bf3a8eb2b7367fbf738da227780a1807885d63f0d0e27ddf82536f1f23f" exitCode=0 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632560 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="29c5e0db8a9d4f0f2dc6b72ae8af6d9ada7256824853969407ab923a9710b4ca" exitCode=0 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632566 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="52170fd074acd4d0b541fe66faa6dbfd0a1f3e12dbda56b05f7ebf193e4489b4" exitCode=0 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632572 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="07ab4219d9bf5b7769a004dfff97e17520e92f8e30791238e6e136cf29d5a8e7" exitCode=0 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632578 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="b49bbf96e1ad1276ddfb0167a4e342e71a2d3248b8dd30e171b623fe16b3e5b3" exitCode=0 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632584 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="169a3663bf7e265b73d12b362a875432f15995d044bb153d925d7a1f2aded521" exitCode=0 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632590 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="56af820548674816735daf22fa48f4a5a08266e6d8ffee8ee41bf2dc35f87bc1" exitCode=0 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632627 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"ec154ac9d6d4c663a8d3edda9d0ce209dd40a1836be6f8424d6593198e1668ad"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632652 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"f3423b44a9f0fea83f6193fc9009ffe5ed847e103301a15f805c18dd8d170f9e"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632664 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"ccd2a2a490675a60a807b626b3856bccab72588539a3eb4bd4bf5dcf6d915355"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632673 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"98a3bd9053451cb0809632f4b67e138c0b75c3d90227f09fcbaeb9c60c6b6bb6"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632681 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"99e83bf3a8eb2b7367fbf738da227780a1807885d63f0d0e27ddf82536f1f23f"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632689 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"29c5e0db8a9d4f0f2dc6b72ae8af6d9ada7256824853969407ab923a9710b4ca"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632697 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"52170fd074acd4d0b541fe66faa6dbfd0a1f3e12dbda56b05f7ebf193e4489b4"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632706 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"07ab4219d9bf5b7769a004dfff97e17520e92f8e30791238e6e136cf29d5a8e7"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632715 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"b49bbf96e1ad1276ddfb0167a4e342e71a2d3248b8dd30e171b623fe16b3e5b3"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632723 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"169a3663bf7e265b73d12b362a875432f15995d044bb153d925d7a1f2aded521"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.632733 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"56af820548674816735daf22fa48f4a5a08266e6d8ffee8ee41bf2dc35f87bc1"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.657737 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-0232-account-create-rnfzp"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.661243 5055 generic.go:334] "Generic (PLEG): container finished" podID="c7e77464-d0e5-4a9b-97a1-557cac810654" containerID="8d5ba7c659e482e9feef4958452dbc1a0c5e75bab6d21a07960f5948ca9ca733" exitCode=143 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.661413 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9f95b955d-vptlr" event={"ID":"c7e77464-d0e5-4a9b-97a1-557cac810654","Type":"ContainerDied","Data":"8d5ba7c659e482e9feef4958452dbc1a0c5e75bab6d21a07960f5948ca9ca733"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.667885 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-xsnj4"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.672113 5055 generic.go:334] "Generic (PLEG): container finished" podID="e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" containerID="d602498d4dd07089c1c212cf834fc53bf0f377b39657719ccd2cf1f0f61cf4e8" exitCode=0 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.672217 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.672206 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" event={"ID":"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac","Type":"ContainerDied","Data":"d602498d4dd07089c1c212cf834fc53bf0f377b39657719ccd2cf1f0f61cf4e8"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.672253 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d4d96bb9-xs84k" event={"ID":"e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac","Type":"ContainerDied","Data":"fa70a8879a5b484a5d340dad79d572b10ce3dd35a64daf5f25124cf0875d44b6"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.672271 5055 scope.go:117] "RemoveContainer" containerID="d602498d4dd07089c1c212cf834fc53bf0f377b39657719ccd2cf1f0f61cf4e8" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.674521 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" (UID: "e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.680169 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder0232-account-delete-6nzbr"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.681778 5055 generic.go:334] "Generic (PLEG): container finished" podID="8950328a-3275-4ba9-8bd8-ea4940f2eaac" containerID="3d5aad7ccd6e2d170e5b68480b570aef7d997fdd46672f0bca2c424d9434d752" exitCode=143 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.681830 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8950328a-3275-4ba9-8bd8-ea4940f2eaac","Type":"ContainerDied","Data":"3d5aad7ccd6e2d170e5b68480b570aef7d997fdd46672f0bca2c424d9434d752"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.691285 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2l5hh" event={"ID":"51322291-81d0-4cbc-a761-0294a8365fd3","Type":"ContainerDied","Data":"24aa56eae5db123d03cb69ec134c4277630f172a76f2d2449612cabd90bd71a6"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.691367 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2l5hh" Oct 11 07:17:48 crc kubenswrapper[5055]: E1011 07:17:48.694745 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0461a09292ab9015ca5bd80c2894512da59d2f14cb2c366a1cfd852dcb6c1234 is running failed: container process not found" containerID="0461a09292ab9015ca5bd80c2894512da59d2f14cb2c366a1cfd852dcb6c1234" cmd=["/usr/bin/pidof","ovsdb-server"] Oct 11 07:17:48 crc kubenswrapper[5055]: E1011 07:17:48.695209 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0461a09292ab9015ca5bd80c2894512da59d2f14cb2c366a1cfd852dcb6c1234 is running failed: container process not found" containerID="0461a09292ab9015ca5bd80c2894512da59d2f14cb2c366a1cfd852dcb6c1234" cmd=["/usr/bin/pidof","ovsdb-server"] Oct 11 07:17:48 crc kubenswrapper[5055]: E1011 07:17:48.697197 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0461a09292ab9015ca5bd80c2894512da59d2f14cb2c366a1cfd852dcb6c1234 is running failed: container process not found" containerID="0461a09292ab9015ca5bd80c2894512da59d2f14cb2c366a1cfd852dcb6c1234" cmd=["/usr/bin/pidof","ovsdb-server"] Oct 11 07:17:48 crc kubenswrapper[5055]: E1011 07:17:48.697249 5055 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0461a09292ab9015ca5bd80c2894512da59d2f14cb2c366a1cfd852dcb6c1234 is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-sb-0" podUID="728d8a1e-9c94-49ce-94f4-491bf34a9b16" containerName="ovsdbserver-sb" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.698021 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_728d8a1e-9c94-49ce-94f4-491bf34a9b16/ovsdbserver-sb/0.log" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.698059 5055 generic.go:334] "Generic (PLEG): container finished" podID="728d8a1e-9c94-49ce-94f4-491bf34a9b16" containerID="4150f4280b66ed07a18507fa2549cb34d90ba3d5f369125336c5e32bea64bc9a" exitCode=2 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.698073 5055 generic.go:334] "Generic (PLEG): container finished" podID="728d8a1e-9c94-49ce-94f4-491bf34a9b16" containerID="0461a09292ab9015ca5bd80c2894512da59d2f14cb2c366a1cfd852dcb6c1234" exitCode=143 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.698142 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"728d8a1e-9c94-49ce-94f4-491bf34a9b16","Type":"ContainerDied","Data":"4150f4280b66ed07a18507fa2549cb34d90ba3d5f369125336c5e32bea64bc9a"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.698189 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"728d8a1e-9c94-49ce-94f4-491bf34a9b16","Type":"ContainerDied","Data":"0461a09292ab9015ca5bd80c2894512da59d2f14cb2c366a1cfd852dcb6c1234"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.712430 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-1232-account-create-xtcnm"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.728216 5055 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.731436 5055 scope.go:117] "RemoveContainer" containerID="33f5af8288f5c5d91a066e3ea5ce9105889c851ba2cc9c6788da707eb3c54eca" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.735172 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementd9e7-account-delete-jsjrs" event={"ID":"f99e172e-068d-423a-b797-2467e352a41b","Type":"ContainerStarted","Data":"8511f64421163b3753c44062369411c09fc6521d3801027ea6498709bdd2e7a7"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.738369 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.739247 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5a2fd839-7187-4fce-be78-2a911103f8cc/ovsdbserver-nb/0.log" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.739303 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5a2fd839-7187-4fce-be78-2a911103f8cc","Type":"ContainerDied","Data":"06f9a2ffeba0f853453c090299cdf535d66d84a9728557181ed3eab39c692a54"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.739387 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.747539 5055 generic.go:334] "Generic (PLEG): container finished" podID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" exitCode=0 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.747758 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-5t8kh" event={"ID":"c5b9917d-52e6-4d08-a131-4d1b8b809161","Type":"ContainerDied","Data":"9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a"} Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.752538 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-1232-account-create-xtcnm"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.775846 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.792893 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.793164 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="71344487-c426-47fe-85cb-927c97465a5b" containerName="glance-log" containerID="cri-o://fa24be4ccfc52d857c58171af39de8465cca261f4d044114005b686c174d38c4" gracePeriod=30 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.793219 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="71344487-c426-47fe-85cb-927c97465a5b" containerName="glance-httpd" containerID="cri-o://8445d4a2e1d0c3a546cd9368a1877165d4bf7e98c38114091de01f3e6ca9a695" gracePeriod=30 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.819528 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-5ef3-account-create-xb4mj"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.830810 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/491a98fd-dad0-4515-a217-c01bd4aa741d-openstack-config\") pod \"491a98fd-dad0-4515-a217-c01bd4aa741d\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.830914 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49cgx\" (UniqueName: \"kubernetes.io/projected/491a98fd-dad0-4515-a217-c01bd4aa741d-kube-api-access-49cgx\") pod \"491a98fd-dad0-4515-a217-c01bd4aa741d\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.830954 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/491a98fd-dad0-4515-a217-c01bd4aa741d-combined-ca-bundle\") pod \"491a98fd-dad0-4515-a217-c01bd4aa741d\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.831012 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/491a98fd-dad0-4515-a217-c01bd4aa741d-openstack-config-secret\") pod \"491a98fd-dad0-4515-a217-c01bd4aa741d\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.834607 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-5ef3-account-create-xb4mj"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.838412 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-config" (OuterVolumeSpecName: "config") pod "e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" (UID: "e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.857249 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_728d8a1e-9c94-49ce-94f4-491bf34a9b16/ovsdbserver-sb/0.log" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.857592 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.859136 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/491a98fd-dad0-4515-a217-c01bd4aa741d-kube-api-access-49cgx" (OuterVolumeSpecName: "kube-api-access-49cgx") pod "491a98fd-dad0-4515-a217-c01bd4aa741d" (UID: "491a98fd-dad0-4515-a217-c01bd4aa741d"). InnerVolumeSpecName "kube-api-access-49cgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.862447 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-nr42l"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.866573 5055 scope.go:117] "RemoveContainer" containerID="d602498d4dd07089c1c212cf834fc53bf0f377b39657719ccd2cf1f0f61cf4e8" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.872714 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" (UID: "e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: E1011 07:17:48.875245 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d602498d4dd07089c1c212cf834fc53bf0f377b39657719ccd2cf1f0f61cf4e8\": container with ID starting with d602498d4dd07089c1c212cf834fc53bf0f377b39657719ccd2cf1f0f61cf4e8 not found: ID does not exist" containerID="d602498d4dd07089c1c212cf834fc53bf0f377b39657719ccd2cf1f0f61cf4e8" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.875302 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d602498d4dd07089c1c212cf834fc53bf0f377b39657719ccd2cf1f0f61cf4e8"} err="failed to get container status \"d602498d4dd07089c1c212cf834fc53bf0f377b39657719ccd2cf1f0f61cf4e8\": rpc error: code = NotFound desc = could not find container \"d602498d4dd07089c1c212cf834fc53bf0f377b39657719ccd2cf1f0f61cf4e8\": container with ID starting with d602498d4dd07089c1c212cf834fc53bf0f377b39657719ccd2cf1f0f61cf4e8 not found: ID does not exist" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.875325 5055 scope.go:117] "RemoveContainer" containerID="33f5af8288f5c5d91a066e3ea5ce9105889c851ba2cc9c6788da707eb3c54eca" Oct 11 07:17:48 crc kubenswrapper[5055]: E1011 07:17:48.879092 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33f5af8288f5c5d91a066e3ea5ce9105889c851ba2cc9c6788da707eb3c54eca\": container with ID starting with 33f5af8288f5c5d91a066e3ea5ce9105889c851ba2cc9c6788da707eb3c54eca not found: ID does not exist" containerID="33f5af8288f5c5d91a066e3ea5ce9105889c851ba2cc9c6788da707eb3c54eca" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.879138 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33f5af8288f5c5d91a066e3ea5ce9105889c851ba2cc9c6788da707eb3c54eca"} err="failed to get container status \"33f5af8288f5c5d91a066e3ea5ce9105889c851ba2cc9c6788da707eb3c54eca\": rpc error: code = NotFound desc = could not find container \"33f5af8288f5c5d91a066e3ea5ce9105889c851ba2cc9c6788da707eb3c54eca\": container with ID starting with 33f5af8288f5c5d91a066e3ea5ce9105889c851ba2cc9c6788da707eb3c54eca not found: ID does not exist" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.879177 5055 scope.go:117] "RemoveContainer" containerID="027b40b9cb9c6878b9ee08889d959efee5dacfac1f9095f8704e25123d3fecd3" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.887283 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="80597a79-e3fd-41cd-b035-a35494775fcb" containerName="rabbitmq" containerID="cri-o://f88b82c51e63276746015a1cc5807c62948a2cdab828c2f6b3281f634111aa61" gracePeriod=604800 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.893078 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" (UID: "e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.894839 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-nr42l"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.901220 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-68ddb9fb98-g6thb"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.901496 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" podUID="7cf968bd-0ddb-4647-8c76-8038cb19d053" containerName="barbican-keystone-listener-log" containerID="cri-o://0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4" gracePeriod=30 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.901845 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" podUID="7cf968bd-0ddb-4647-8c76-8038cb19d053" containerName="barbican-keystone-listener" containerID="cri-o://6c92db08f0560ec22ab28b315f7ec817b7f7ae5ff9b9adbe69e0c475a691fdb1" gracePeriod=30 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.908308 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/491a98fd-dad0-4515-a217-c01bd4aa741d-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "491a98fd-dad0-4515-a217-c01bd4aa741d" (UID: "491a98fd-dad0-4515-a217-c01bd4aa741d"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.912627 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance5ef3-account-delete-px4qv"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.930395 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-7fffd4488c-r5vkt"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.930681 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-7fffd4488c-r5vkt" podUID="0bd3f46f-87e3-42e7-a37d-e746fb601f60" containerName="barbican-worker-log" containerID="cri-o://5b162a0640d55adbc95b5344fee3401f099e891f722bbcd511184a279d6fc712" gracePeriod=30 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.931074 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-7fffd4488c-r5vkt" podUID="0bd3f46f-87e3-42e7-a37d-e746fb601f60" containerName="barbican-worker" containerID="cri-o://161a466facbf08e74bf08d457e0525bdf9302acd69cb47d57a131bf40297c7a8" gracePeriod=30 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.932558 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/728d8a1e-9c94-49ce-94f4-491bf34a9b16-scripts\") pod \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.943614 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/728d8a1e-9c94-49ce-94f4-491bf34a9b16-config\") pod \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.943828 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/728d8a1e-9c94-49ce-94f4-491bf34a9b16-ovsdb-rundir\") pod \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.943938 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-metrics-certs-tls-certs\") pod \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.943961 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.943999 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-combined-ca-bundle\") pod \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.944042 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wllkk\" (UniqueName: \"kubernetes.io/projected/728d8a1e-9c94-49ce-94f4-491bf34a9b16-kube-api-access-wllkk\") pod \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.944066 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-ovsdbserver-sb-tls-certs\") pod \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\" (UID: \"728d8a1e-9c94-49ce-94f4-491bf34a9b16\") " Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.945193 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.945212 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.945223 5055 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/491a98fd-dad0-4515-a217-c01bd4aa741d-openstack-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.945231 5055 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.945241 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49cgx\" (UniqueName: \"kubernetes.io/projected/491a98fd-dad0-4515-a217-c01bd4aa741d-kube-api-access-49cgx\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.950121 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/728d8a1e-9c94-49ce-94f4-491bf34a9b16-config" (OuterVolumeSpecName: "config") pod "728d8a1e-9c94-49ce-94f4-491bf34a9b16" (UID: "728d8a1e-9c94-49ce-94f4-491bf34a9b16"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.951671 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/728d8a1e-9c94-49ce-94f4-491bf34a9b16-scripts" (OuterVolumeSpecName: "scripts") pod "728d8a1e-9c94-49ce-94f4-491bf34a9b16" (UID: "728d8a1e-9c94-49ce-94f4-491bf34a9b16"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.954773 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "728d8a1e-9c94-49ce-94f4-491bf34a9b16" (UID: "728d8a1e-9c94-49ce-94f4-491bf34a9b16"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.967913 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/728d8a1e-9c94-49ce-94f4-491bf34a9b16-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "728d8a1e-9c94-49ce-94f4-491bf34a9b16" (UID: "728d8a1e-9c94-49ce-94f4-491bf34a9b16"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.977174 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.977465 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="50ae0390-2ce4-47fe-88af-831aee265a6c" containerName="glance-log" containerID="cri-o://c1d53b317d7b7a6df70c5f5efdb8793f32b63b9f67d43aa9d015d200b5d8bac5" gracePeriod=30 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.977941 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="50ae0390-2ce4-47fe-88af-831aee265a6c" containerName="glance-httpd" containerID="cri-o://d7f9eac5a0e6f11b97625c4fa2e0f22db3b2a7d4736d79f64658b64c84d7459c" gracePeriod=30 Oct 11 07:17:48 crc kubenswrapper[5055]: I1011 07:17:48.996997 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/491a98fd-dad0-4515-a217-c01bd4aa741d-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "491a98fd-dad0-4515-a217-c01bd4aa741d" (UID: "491a98fd-dad0-4515-a217-c01bd4aa741d"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.047152 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/491a98fd-dad0-4515-a217-c01bd4aa741d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "491a98fd-dad0-4515-a217-c01bd4aa741d" (UID: "491a98fd-dad0-4515-a217-c01bd4aa741d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.049018 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/491a98fd-dad0-4515-a217-c01bd4aa741d-combined-ca-bundle\") pod \"491a98fd-dad0-4515-a217-c01bd4aa741d\" (UID: \"491a98fd-dad0-4515-a217-c01bd4aa741d\") " Oct 11 07:17:49 crc kubenswrapper[5055]: W1011 07:17:49.049481 5055 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/491a98fd-dad0-4515-a217-c01bd4aa741d/volumes/kubernetes.io~secret/combined-ca-bundle Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.049494 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/491a98fd-dad0-4515-a217-c01bd4aa741d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "491a98fd-dad0-4515-a217-c01bd4aa741d" (UID: "491a98fd-dad0-4515-a217-c01bd4aa741d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.049831 5055 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.049854 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/728d8a1e-9c94-49ce-94f4-491bf34a9b16-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.049864 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/728d8a1e-9c94-49ce-94f4-491bf34a9b16-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.049874 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/491a98fd-dad0-4515-a217-c01bd4aa741d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.049886 5055 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/491a98fd-dad0-4515-a217-c01bd4aa741d-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.049894 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/728d8a1e-9c94-49ce-94f4-491bf34a9b16-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.052819 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/728d8a1e-9c94-49ce-94f4-491bf34a9b16-kube-api-access-wllkk" (OuterVolumeSpecName: "kube-api-access-wllkk") pod "728d8a1e-9c94-49ce-94f4-491bf34a9b16" (UID: "728d8a1e-9c94-49ce-94f4-491bf34a9b16"). InnerVolumeSpecName "kube-api-access-wllkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.059343 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "728d8a1e-9c94-49ce-94f4-491bf34a9b16" (UID: "728d8a1e-9c94-49ce-94f4-491bf34a9b16"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.088499 5055 scope.go:117] "RemoveContainer" containerID="0f67a32ae0f476c9f1b1e19bc12c766c0df90a9fc5070dfdfc272f9a4725310f" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.123030 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d54edae-40fd-42a3-b80f-d0633ea306e1" path="/var/lib/kubelet/pods/0d54edae-40fd-42a3-b80f-d0633ea306e1/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.123617 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14f4e1eb-4198-4c36-a920-8161b80d1f9a" path="/var/lib/kubelet/pods/14f4e1eb-4198-4c36-a920-8161b80d1f9a/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.124382 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3038824c-7f07-4822-a8b5-a812d34adb18" path="/var/lib/kubelet/pods/3038824c-7f07-4822-a8b5-a812d34adb18/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.124962 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41210913-fc01-40fc-ae9a-c11bd4e58345" path="/var/lib/kubelet/pods/41210913-fc01-40fc-ae9a-c11bd4e58345/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.127966 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="491a98fd-dad0-4515-a217-c01bd4aa741d" path="/var/lib/kubelet/pods/491a98fd-dad0-4515-a217-c01bd4aa741d/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.128457 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53b11174-ab68-45f3-8933-522549982191" path="/var/lib/kubelet/pods/53b11174-ab68-45f3-8933-522549982191/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.137380 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cd2b520-4d17-4d96-a0d1-a6bdf242f973" path="/var/lib/kubelet/pods/6cd2b520-4d17-4d96-a0d1-a6bdf242f973/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.138220 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7902df61-9a6f-4f11-bb2f-95843e9df7a5" path="/var/lib/kubelet/pods/7902df61-9a6f-4f11-bb2f-95843e9df7a5/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.138690 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b817f811-c9eb-4a75-b166-e379e0747e9f" path="/var/lib/kubelet/pods/b817f811-c9eb-4a75-b166-e379e0747e9f/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.139194 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c13685b1-481e-4a5b-9a66-94c6c9fc0fcd" path="/var/lib/kubelet/pods/c13685b1-481e-4a5b-9a66-94c6c9fc0fcd/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.151263 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c649c0a0-d654-447f-9558-4b0376e1beaf" path="/var/lib/kubelet/pods/c649c0a0-d654-447f-9558-4b0376e1beaf/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.151891 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb84efeb-f131-48fc-b283-c6cb0db40cbe" path="/var/lib/kubelet/pods/cb84efeb-f131-48fc-b283-c6cb0db40cbe/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.152411 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d331a829-dd7e-4cfb-98f4-dc2e797fdfa8" path="/var/lib/kubelet/pods/d331a829-dd7e-4cfb-98f4-dc2e797fdfa8/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.153016 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.153071 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7982ac6-3330-40ea-a74b-1f512b13b004" path="/var/lib/kubelet/pods/e7982ac6-3330-40ea-a74b-1f512b13b004/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.154617 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wllkk\" (UniqueName: \"kubernetes.io/projected/728d8a1e-9c94-49ce-94f4-491bf34a9b16-kube-api-access-wllkk\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.158932 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8dcd064-841e-43a0-a210-33ee23f04450" path="/var/lib/kubelet/pods/e8dcd064-841e-43a0-a210-33ee23f04450/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.159706 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f76f706d-47b9-43df-94f1-80767e21f5c9" path="/var/lib/kubelet/pods/f76f706d-47b9-43df-94f1-80767e21f5c9/volumes" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.165620 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" (UID: "e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.192093 5055 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.228064 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "728d8a1e-9c94-49ce-94f4-491bf34a9b16" (UID: "728d8a1e-9c94-49ce-94f4-491bf34a9b16"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:49 crc kubenswrapper[5055]: E1011 07:17:49.256021 5055 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2f75249_4d9f_44bf_af62_de6757d2326a.slice/crio-conmon-e41f04a80d59653f303efa7f5149b061d767b0550c8a0d24bd1f7a2ffb40a7a7.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0bc70ff6_8410_4873_8030_2981e62e73f0.slice/crio-1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a2fd839_7187_4fce_be78_2a911103f8cc.slice/crio-06f9a2ffeba0f853453c090299cdf535d66d84a9728557181ed3eab39c692a54\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2f75249_4d9f_44bf_af62_de6757d2326a.slice/crio-c874955aa943101ec45b5190a57d5bc2728a209a837dabcf4f0dd080c233522f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51322291_81d0_4cbc_a761_0294a8365fd3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51322291_81d0_4cbc_a761_0294a8365fd3.slice/crio-24aa56eae5db123d03cb69ec134c4277630f172a76f2d2449612cabd90bd71a6\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0bc70ff6_8410_4873_8030_2981e62e73f0.slice/crio-conmon-1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2f75249_4d9f_44bf_af62_de6757d2326a.slice/crio-conmon-c874955aa943101ec45b5190a57d5bc2728a209a837dabcf4f0dd080c233522f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a2fd839_7187_4fce_be78_2a911103f8cc.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7cf968bd_0ddb_4647_8c76_8038cb19d053.slice/crio-0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2f75249_4d9f_44bf_af62_de6757d2326a.slice/crio-e8435110db69165389151e0e5ced69984b6208142b9e0d1c97ebf70f6117a07b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod71344487_c426_47fe_85cb_927c97465a5b.slice/crio-conmon-fa24be4ccfc52d857c58171af39de8465cca261f4d044114005b686c174d38c4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7cf968bd_0ddb_4647_8c76_8038cb19d053.slice/crio-conmon-0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2f75249_4d9f_44bf_af62_de6757d2326a.slice/crio-conmon-e8435110db69165389151e0e5ced69984b6208142b9e0d1c97ebf70f6117a07b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2f75249_4d9f_44bf_af62_de6757d2326a.slice/crio-e41f04a80d59653f303efa7f5149b061d767b0550c8a0d24bd1f7a2ffb40a7a7.scope\": RecentStats: unable to find data in memory cache]" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.257031 5055 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.257108 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.257118 5055 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.269095 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "728d8a1e-9c94-49ce-94f4-491bf34a9b16" (UID: "728d8a1e-9c94-49ce-94f4-491bf34a9b16"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.360187 5055 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/728d8a1e-9c94-49ce-94f4-491bf34a9b16-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402391 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402441 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-h7jx7"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402454 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-h7jx7"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402469 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402482 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-4c23-account-create-xgh7m"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402494 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-4c23-account-create-xgh7m"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402504 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell14c23-account-delete-sgt68"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402515 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-66cb796856-btqn8"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402525 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402535 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-4ccpj"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402543 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0372a-account-delete-68qnz"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402556 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-4ccpj"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402565 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placementd9e7-account-delete-jsjrs"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402574 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-6c8466445c-jqscq"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402584 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-372a-account-create-mzxwb"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402592 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-372a-account-create-mzxwb"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402602 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.402614 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zpjhx"] Oct 11 07:17:49 crc kubenswrapper[5055]: E1011 07:17:49.403754 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="beb2c0e8-4291-454c-aae1-28c186965761" containerName="openstack-network-exporter" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.403788 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="beb2c0e8-4291-454c-aae1-28c186965761" containerName="openstack-network-exporter" Oct 11 07:17:49 crc kubenswrapper[5055]: E1011 07:17:49.403809 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" containerName="init" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.403815 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" containerName="init" Oct 11 07:17:49 crc kubenswrapper[5055]: E1011 07:17:49.403826 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51322291-81d0-4cbc-a761-0294a8365fd3" containerName="ovn-controller" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.403832 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="51322291-81d0-4cbc-a761-0294a8365fd3" containerName="ovn-controller" Oct 11 07:17:49 crc kubenswrapper[5055]: E1011 07:17:49.403847 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" containerName="dnsmasq-dns" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.403854 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" containerName="dnsmasq-dns" Oct 11 07:17:49 crc kubenswrapper[5055]: E1011 07:17:49.403869 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="728d8a1e-9c94-49ce-94f4-491bf34a9b16" containerName="ovsdbserver-sb" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.403875 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="728d8a1e-9c94-49ce-94f4-491bf34a9b16" containerName="ovsdbserver-sb" Oct 11 07:17:49 crc kubenswrapper[5055]: E1011 07:17:49.403885 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a2fd839-7187-4fce-be78-2a911103f8cc" containerName="ovsdbserver-nb" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.403891 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a2fd839-7187-4fce-be78-2a911103f8cc" containerName="ovsdbserver-nb" Oct 11 07:17:49 crc kubenswrapper[5055]: E1011 07:17:49.403905 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="728d8a1e-9c94-49ce-94f4-491bf34a9b16" containerName="openstack-network-exporter" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.403911 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="728d8a1e-9c94-49ce-94f4-491bf34a9b16" containerName="openstack-network-exporter" Oct 11 07:17:49 crc kubenswrapper[5055]: E1011 07:17:49.403930 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a2fd839-7187-4fce-be78-2a911103f8cc" containerName="openstack-network-exporter" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.403936 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a2fd839-7187-4fce-be78-2a911103f8cc" containerName="openstack-network-exporter" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.404116 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a2fd839-7187-4fce-be78-2a911103f8cc" containerName="openstack-network-exporter" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.404129 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="beb2c0e8-4291-454c-aae1-28c186965761" containerName="openstack-network-exporter" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.404135 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="51322291-81d0-4cbc-a761-0294a8365fd3" containerName="ovn-controller" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.404145 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" containerName="dnsmasq-dns" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.404158 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="728d8a1e-9c94-49ce-94f4-491bf34a9b16" containerName="ovsdbserver-sb" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.404165 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a2fd839-7187-4fce-be78-2a911103f8cc" containerName="ovsdbserver-nb" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.404174 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="728d8a1e-9c94-49ce-94f4-491bf34a9b16" containerName="openstack-network-exporter" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.405928 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zpjhx"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.405951 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-p2xsl"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.405964 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-p2xsl"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.406036 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.407705 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-2l5hh"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.408063 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="9eb6d469-724e-4dc7-943c-60454db39c60" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://d21aed6aaefbc9580be9ca7f2e6eb8ebd1a3f5f50e101c4c845a2f3a24925d68" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.408572 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-6c8466445c-jqscq" podUID="5f76ee4c-bc76-492a-898d-def8ec69e291" containerName="proxy-httpd" containerID="cri-o://3f492337a36f4101c61292981fed7289c528328ea9ced5f8117499fb88135159" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.408648 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-6c8466445c-jqscq" podUID="5f76ee4c-bc76-492a-898d-def8ec69e291" containerName="proxy-server" containerID="cri-o://50f25ffb536fa0772a202d0f5854aa815a2d08286c15c5285861895cfbdcbfc1" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.408823 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="056ae819-c243-4aa7-8214-1432f198dcab" containerName="nova-metadata-log" containerID="cri-o://2a3c3a9e15d6aecd7cda27492c58fef072ebf992a74d0cfa671a938bba40e6d9" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.408950 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="451f7ab0-0020-4931-a42f-f56eb5b28201" containerName="nova-api-log" containerID="cri-o://46946b80937373ad46df4157d6f6a132a04b8f37382322b6bc9b29dca6a898ce" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.409065 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-66cb796856-btqn8" podUID="324f6d50-368a-4f8c-8cee-4a9ff543ba31" containerName="barbican-api-log" containerID="cri-o://6bdcd9c1fc8e6d8238ec30f7f0ec872804f3202b031d16214896ea2c72adfe7c" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.409136 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-66cb796856-btqn8" podUID="324f6d50-368a-4f8c-8cee-4a9ff543ba31" containerName="barbican-api" containerID="cri-o://b032f4e2acda19474410a349d97f1f7434164e628aa86411ceb38dcda72e0648" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.409189 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="056ae819-c243-4aa7-8214-1432f198dcab" containerName="nova-metadata-metadata" containerID="cri-o://4e2bb178a97af21102f507810962f80618de3fea812c149a4cec5443c0f81b8d" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.409244 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="451f7ab0-0020-4931-a42f-f56eb5b28201" containerName="nova-api-api" containerID="cri-o://d28b05014afad86588cab28904e2c797b56d4471ded7feefa065183a00b9d6ab" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.430485 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-2l5hh"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.449920 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.463882 5055 scope.go:117] "RemoveContainer" containerID="592f9a3bef2915f38f89bd3def34fbbcb1bece166f2fe29ce499fa62c6995ab3" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.464855 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4px6k\" (UniqueName: \"kubernetes.io/projected/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-kube-api-access-4px6k\") pod \"redhat-marketplace-zpjhx\" (UID: \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\") " pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.464917 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-utilities\") pod \"redhat-marketplace-zpjhx\" (UID: \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\") " pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.465035 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-catalog-content\") pod \"redhat-marketplace-zpjhx\" (UID: \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\") " pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.512018 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.522835 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder0232-account-delete-6nzbr"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.544080 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican1232-account-delete-ct7zg"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.567942 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-catalog-content\") pod \"redhat-marketplace-zpjhx\" (UID: \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\") " pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.568008 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4px6k\" (UniqueName: \"kubernetes.io/projected/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-kube-api-access-4px6k\") pod \"redhat-marketplace-zpjhx\" (UID: \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\") " pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.568045 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-utilities\") pod \"redhat-marketplace-zpjhx\" (UID: \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\") " pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.569022 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-utilities\") pod \"redhat-marketplace-zpjhx\" (UID: \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\") " pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.569239 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-catalog-content\") pod \"redhat-marketplace-zpjhx\" (UID: \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\") " pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:17:49 crc kubenswrapper[5055]: E1011 07:17:49.569288 5055 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Oct 11 07:17:49 crc kubenswrapper[5055]: E1011 07:17:49.569327 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data podName:6baacc00-a270-4662-ba67-aad18287df2c nodeName:}" failed. No retries permitted until 2025-10-11 07:17:53.569313605 +0000 UTC m=+1457.343587412 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data") pod "rabbitmq-cell1-server-0" (UID: "6baacc00-a270-4662-ba67-aad18287df2c") : configmap "rabbitmq-cell1-config-data" not found Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.573435 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance5ef3-account-delete-px4qv"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.613188 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell14c23-account-delete-sgt68"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.633700 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4px6k\" (UniqueName: \"kubernetes.io/projected/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-kube-api-access-4px6k\") pod \"redhat-marketplace-zpjhx\" (UID: \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\") " pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.650876 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapi9573-account-delete-4cwkv"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.661121 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0372a-account-delete-68qnz"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.703838 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.704264 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="e2e0c881-0205-4317-ba03-cfb76f0f69e6" containerName="nova-scheduler-scheduler" containerID="cri-o://cf5f42235a42f219aee9d395cf87f7e377aaf80da0d2626e197e998f6d477ba8" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.737456 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-94mmk"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.747276 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-94mmk"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.758650 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d4d96bb9-xs84k"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.770489 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d4d96bb9-xs84k"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.775021 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.775222 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="6ff3f14d-ff09-4533-89d6-e53afbda0dd6" containerName="nova-cell0-conductor-conductor" containerID="cri-o://b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.781725 5055 generic.go:334] "Generic (PLEG): container finished" podID="5f76ee4c-bc76-492a-898d-def8ec69e291" containerID="3f492337a36f4101c61292981fed7289c528328ea9ced5f8117499fb88135159" exitCode=0 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.781790 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6c8466445c-jqscq" event={"ID":"5f76ee4c-bc76-492a-898d-def8ec69e291","Type":"ContainerDied","Data":"3f492337a36f4101c61292981fed7289c528328ea9ced5f8117499fb88135159"} Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.782953 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xbzvs"] Oct 11 07:17:49 crc kubenswrapper[5055]: E1011 07:17:49.784950 5055 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Oct 11 07:17:49 crc kubenswrapper[5055]: E1011 07:17:49.785013 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data podName:80597a79-e3fd-41cd-b035-a35494775fcb nodeName:}" failed. No retries permitted until 2025-10-11 07:17:53.784997253 +0000 UTC m=+1457.559271050 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data") pod "rabbitmq-server-0" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb") : configmap "rabbitmq-config-data" not found Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.792357 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.792533 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="6a52d356-2098-4a61-be67-5519a2d2b78f" containerName="nova-cell1-conductor-conductor" containerID="cri-o://fb731c00bae126ce96a2762c9416854d8181fa8097933c9a989d30dcc1d84d82" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.793518 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi9573-account-delete-4cwkv" event={"ID":"4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241","Type":"ContainerStarted","Data":"21b9692b298274e8629ab582e9f1919ba611ba7e5957cd74e6760c445854cef3"} Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.797054 5055 generic.go:334] "Generic (PLEG): container finished" podID="f99e172e-068d-423a-b797-2467e352a41b" containerID="3dec843ae2891ce55d56d4dae7c2a19d4ac03ffc87ec97d3f77f19af0aaedb44" exitCode=0 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.797151 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementd9e7-account-delete-jsjrs" event={"ID":"f99e172e-068d-423a-b797-2467e352a41b","Type":"ContainerDied","Data":"3dec843ae2891ce55d56d4dae7c2a19d4ac03ffc87ec97d3f77f19af0aaedb44"} Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.800639 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-xbzvs"] Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.809197 5055 generic.go:334] "Generic (PLEG): container finished" podID="0bd3f46f-87e3-42e7-a37d-e746fb601f60" containerID="5b162a0640d55adbc95b5344fee3401f099e891f722bbcd511184a279d6fc712" exitCode=143 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.809280 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fffd4488c-r5vkt" event={"ID":"0bd3f46f-87e3-42e7-a37d-e746fb601f60","Type":"ContainerDied","Data":"5b162a0640d55adbc95b5344fee3401f099e891f722bbcd511184a279d6fc712"} Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.812259 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0372a-account-delete-68qnz" event={"ID":"01e71e0c-56f0-4bfc-bd58-501785a5d235","Type":"ContainerStarted","Data":"11cee0c2a6d62bfd38dbf718d4cc936e7d223cab95f64b69aa71174befaef7ca"} Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.838318 5055 generic.go:334] "Generic (PLEG): container finished" podID="d9b61d74-f126-4515-ba61-151f8ec0b48c" containerID="1da381ef06212eb84424fa5dc015bebad8d90c047d971d7358425108cd91869c" exitCode=0 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.838400 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58d4f6c497-jm75b" event={"ID":"d9b61d74-f126-4515-ba61-151f8ec0b48c","Type":"ContainerDied","Data":"1da381ef06212eb84424fa5dc015bebad8d90c047d971d7358425108cd91869c"} Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.840313 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell14c23-account-delete-sgt68" event={"ID":"acf7edb2-ebde-4783-b428-b6ce809be9f9","Type":"ContainerStarted","Data":"1b5cf5d9b22890e585b1b240aa56900b010b846adfc5872b84eed4d8a650ebe7"} Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.840358 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell14c23-account-delete-sgt68" event={"ID":"acf7edb2-ebde-4783-b428-b6ce809be9f9","Type":"ContainerStarted","Data":"f2744e412e97c231ce46e0a3d4ff3373bd55515893600dd112e6474cf31e24ae"} Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.840478 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/novacell14c23-account-delete-sgt68" podUID="acf7edb2-ebde-4783-b428-b6ce809be9f9" containerName="mariadb-account-delete" containerID="cri-o://1b5cf5d9b22890e585b1b240aa56900b010b846adfc5872b84eed4d8a650ebe7" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.850505 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="01de7a83-4a08-494a-a3b9-774e6787e30f" containerName="galera" containerID="cri-o://5433269dd3db71b29ff92097b8c5e94a07217bab29089e9289f976033732e76a" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.858070 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance5ef3-account-delete-px4qv" event={"ID":"f360ed99-6efd-4281-a18b-7597df5341b6","Type":"ContainerStarted","Data":"58423930f226c4be75a7e72d808276dfe1e3f0699e5542c814dd6b4780f9542d"} Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.864807 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican1232-account-delete-ct7zg" event={"ID":"84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7","Type":"ContainerStarted","Data":"21e1fddac948ab938719326e70c908df0e32494cb87949b6a596230d110665c1"} Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.864852 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican1232-account-delete-ct7zg" event={"ID":"84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7","Type":"ContainerStarted","Data":"d32cfc38507899643fce06fd539b392ff57507ec814b18d2c61625782ef693c4"} Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.864973 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican1232-account-delete-ct7zg" podUID="84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7" containerName="mariadb-account-delete" containerID="cri-o://21e1fddac948ab938719326e70c908df0e32494cb87949b6a596230d110665c1" gracePeriod=30 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.873617 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novacell14c23-account-delete-sgt68" podStartSLOduration=3.873597853 podStartE2EDuration="3.873597853s" podCreationTimestamp="2025-10-11 07:17:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:17:49.862237959 +0000 UTC m=+1453.636511776" watchObservedRunningTime="2025-10-11 07:17:49.873597853 +0000 UTC m=+1453.647871660" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.880404 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.881052 5055 scope.go:117] "RemoveContainer" containerID="717aceea1665af2d47ebfdceb4d8f53a94f23eb3af20babe5546bf58c47b4764" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.920970 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.938159 5055 generic.go:334] "Generic (PLEG): container finished" podID="50ae0390-2ce4-47fe-88af-831aee265a6c" containerID="c1d53b317d7b7a6df70c5f5efdb8793f32b63b9f67d43aa9d015d200b5d8bac5" exitCode=143 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.938235 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"50ae0390-2ce4-47fe-88af-831aee265a6c","Type":"ContainerDied","Data":"c1d53b317d7b7a6df70c5f5efdb8793f32b63b9f67d43aa9d015d200b5d8bac5"} Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.962291 5055 generic.go:334] "Generic (PLEG): container finished" podID="056ae819-c243-4aa7-8214-1432f198dcab" containerID="2a3c3a9e15d6aecd7cda27492c58fef072ebf992a74d0cfa671a938bba40e6d9" exitCode=143 Oct 11 07:17:49 crc kubenswrapper[5055]: I1011 07:17:49.962347 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"056ae819-c243-4aa7-8214-1432f198dcab","Type":"ContainerDied","Data":"2a3c3a9e15d6aecd7cda27492c58fef072ebf992a74d0cfa671a938bba40e6d9"} Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.026945 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_728d8a1e-9c94-49ce-94f4-491bf34a9b16/ovsdbserver-sb/0.log" Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.027011 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"728d8a1e-9c94-49ce-94f4-491bf34a9b16","Type":"ContainerDied","Data":"54dafb3189f4c266b9a8f1df83b35d9ddb0496452f4a5ca42692c7f83479abee"} Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.027099 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.091299 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican1232-account-delete-ct7zg" podStartSLOduration=4.091275858 podStartE2EDuration="4.091275858s" podCreationTimestamp="2025-10-11 07:17:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 07:17:49.896429145 +0000 UTC m=+1453.670702952" watchObservedRunningTime="2025-10-11 07:17:50.091275858 +0000 UTC m=+1453.865549665" Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.091793 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.102493 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.110244 5055 scope.go:117] "RemoveContainer" containerID="4150f4280b66ed07a18507fa2549cb34d90ba3d5f369125336c5e32bea64bc9a" Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.114099 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="e8435110db69165389151e0e5ced69984b6208142b9e0d1c97ebf70f6117a07b" exitCode=0 Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.114130 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="e41f04a80d59653f303efa7f5149b061d767b0550c8a0d24bd1f7a2ffb40a7a7" exitCode=0 Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.114138 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="c874955aa943101ec45b5190a57d5bc2728a209a837dabcf4f0dd080c233522f" exitCode=0 Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.114171 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"e8435110db69165389151e0e5ced69984b6208142b9e0d1c97ebf70f6117a07b"} Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.114197 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"e41f04a80d59653f303efa7f5149b061d767b0550c8a0d24bd1f7a2ffb40a7a7"} Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.114210 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"c874955aa943101ec45b5190a57d5bc2728a209a837dabcf4f0dd080c233522f"} Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.139436 5055 generic.go:334] "Generic (PLEG): container finished" podID="0bc70ff6-8410-4873-8030-2981e62e73f0" containerID="1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad" exitCode=0 Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.139854 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0bc70ff6-8410-4873-8030-2981e62e73f0","Type":"ContainerDied","Data":"1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad"} Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.142520 5055 generic.go:334] "Generic (PLEG): container finished" podID="71344487-c426-47fe-85cb-927c97465a5b" containerID="fa24be4ccfc52d857c58171af39de8465cca261f4d044114005b686c174d38c4" exitCode=143 Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.142574 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"71344487-c426-47fe-85cb-927c97465a5b","Type":"ContainerDied","Data":"fa24be4ccfc52d857c58171af39de8465cca261f4d044114005b686c174d38c4"} Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.143875 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder0232-account-delete-6nzbr" event={"ID":"c647ccff-95d1-467a-9fcd-4b5110772361","Type":"ContainerStarted","Data":"50479c196e7327c65aee41754b6af0da59a3e387d101a5c5bf82f4079ddb1d67"} Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.163066 5055 generic.go:334] "Generic (PLEG): container finished" podID="451f7ab0-0020-4931-a42f-f56eb5b28201" containerID="46946b80937373ad46df4157d6f6a132a04b8f37382322b6bc9b29dca6a898ce" exitCode=143 Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.163180 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"451f7ab0-0020-4931-a42f-f56eb5b28201","Type":"ContainerDied","Data":"46946b80937373ad46df4157d6f6a132a04b8f37382322b6bc9b29dca6a898ce"} Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.168265 5055 generic.go:334] "Generic (PLEG): container finished" podID="7cf968bd-0ddb-4647-8c76-8038cb19d053" containerID="0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4" exitCode=143 Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.168351 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" event={"ID":"7cf968bd-0ddb-4647-8c76-8038cb19d053","Type":"ContainerDied","Data":"0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4"} Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.181901 5055 generic.go:334] "Generic (PLEG): container finished" podID="324f6d50-368a-4f8c-8cee-4a9ff543ba31" containerID="6bdcd9c1fc8e6d8238ec30f7f0ec872804f3202b031d16214896ea2c72adfe7c" exitCode=143 Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.181948 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-66cb796856-btqn8" event={"ID":"324f6d50-368a-4f8c-8cee-4a9ff543ba31","Type":"ContainerDied","Data":"6bdcd9c1fc8e6d8238ec30f7f0ec872804f3202b031d16214896ea2c72adfe7c"} Oct 11 07:17:50 crc kubenswrapper[5055]: E1011 07:17:50.394437 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:17:50 crc kubenswrapper[5055]: E1011 07:17:50.394963 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:17:50 crc kubenswrapper[5055]: E1011 07:17:50.395447 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:17:50 crc kubenswrapper[5055]: E1011 07:17:50.395482 5055 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovsdb-server" Oct 11 07:17:50 crc kubenswrapper[5055]: E1011 07:17:50.397277 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:17:50 crc kubenswrapper[5055]: E1011 07:17:50.399207 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:17:50 crc kubenswrapper[5055]: E1011 07:17:50.400347 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:17:50 crc kubenswrapper[5055]: E1011 07:17:50.400383 5055 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovs-vswitchd" Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.587504 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placementd9e7-account-delete-jsjrs" Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.639976 5055 scope.go:117] "RemoveContainer" containerID="0461a09292ab9015ca5bd80c2894512da59d2f14cb2c366a1cfd852dcb6c1234" Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.727834 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mglh\" (UniqueName: \"kubernetes.io/projected/f99e172e-068d-423a-b797-2467e352a41b-kube-api-access-9mglh\") pod \"f99e172e-068d-423a-b797-2467e352a41b\" (UID: \"f99e172e-068d-423a-b797-2467e352a41b\") " Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.734003 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f99e172e-068d-423a-b797-2467e352a41b-kube-api-access-9mglh" (OuterVolumeSpecName: "kube-api-access-9mglh") pod "f99e172e-068d-423a-b797-2467e352a41b" (UID: "f99e172e-068d-423a-b797-2467e352a41b"). InnerVolumeSpecName "kube-api-access-9mglh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.829797 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mglh\" (UniqueName: \"kubernetes.io/projected/f99e172e-068d-423a-b797-2467e352a41b-kube-api-access-9mglh\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.874617 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican1232-account-delete-ct7zg" Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.896549 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell14c23-account-delete-sgt68" Oct 11 07:17:50 crc kubenswrapper[5055]: I1011 07:17:50.975531 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder0232-account-delete-6nzbr" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.028083 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1103eafd-fb3f-472a-8a90-2733ef956fe2" path="/var/lib/kubelet/pods/1103eafd-fb3f-472a-8a90-2733ef956fe2/volumes" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.028872 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="472e70e9-3fa9-4f4d-a1ae-646e04c485f5" path="/var/lib/kubelet/pods/472e70e9-3fa9-4f4d-a1ae-646e04c485f5/volumes" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.029495 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51322291-81d0-4cbc-a761-0294a8365fd3" path="/var/lib/kubelet/pods/51322291-81d0-4cbc-a761-0294a8365fd3/volumes" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.030113 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59dafca2-dcd1-430c-8235-33df97f6cb43" path="/var/lib/kubelet/pods/59dafca2-dcd1-430c-8235-33df97f6cb43/volumes" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.031316 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a2fd839-7187-4fce-be78-2a911103f8cc" path="/var/lib/kubelet/pods/5a2fd839-7187-4fce-be78-2a911103f8cc/volumes" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.031985 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="728d8a1e-9c94-49ce-94f4-491bf34a9b16" path="/var/lib/kubelet/pods/728d8a1e-9c94-49ce-94f4-491bf34a9b16/volumes" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.035378 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2bnj\" (UniqueName: \"kubernetes.io/projected/84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7-kube-api-access-f2bnj\") pod \"84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7\" (UID: \"84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.035561 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4ptm\" (UniqueName: \"kubernetes.io/projected/acf7edb2-ebde-4783-b428-b6ce809be9f9-kube-api-access-l4ptm\") pod \"acf7edb2-ebde-4783-b428-b6ce809be9f9\" (UID: \"acf7edb2-ebde-4783-b428-b6ce809be9f9\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.038802 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75e6c9e3-4bce-4031-94a5-71396f262b51" path="/var/lib/kubelet/pods/75e6c9e3-4bce-4031-94a5-71396f262b51/volumes" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.039206 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acf7edb2-ebde-4783-b428-b6ce809be9f9-kube-api-access-l4ptm" (OuterVolumeSpecName: "kube-api-access-l4ptm") pod "acf7edb2-ebde-4783-b428-b6ce809be9f9" (UID: "acf7edb2-ebde-4783-b428-b6ce809be9f9"). InnerVolumeSpecName "kube-api-access-l4ptm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.039632 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="beb2c0e8-4291-454c-aae1-28c186965761" path="/var/lib/kubelet/pods/beb2c0e8-4291-454c-aae1-28c186965761/volumes" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.040442 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0129105-ee1c-480f-a857-48e5cd08efd7" path="/var/lib/kubelet/pods/d0129105-ee1c-480f-a857-48e5cd08efd7/volumes" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.040928 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7-kube-api-access-f2bnj" (OuterVolumeSpecName: "kube-api-access-f2bnj") pod "84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7" (UID: "84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7"). InnerVolumeSpecName "kube-api-access-f2bnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.054040 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac" path="/var/lib/kubelet/pods/e3ffe4f1-6dc4-4b98-90ef-be3de8ebafac/volumes" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.054642 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d" path="/var/lib/kubelet/pods/eb78ddb2-aaa5-4b1b-bf9c-0b38775f628d/volumes" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.059389 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.070321 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.139227 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtnhh\" (UniqueName: \"kubernetes.io/projected/c647ccff-95d1-467a-9fcd-4b5110772361-kube-api-access-xtnhh\") pod \"c647ccff-95d1-467a-9fcd-4b5110772361\" (UID: \"c647ccff-95d1-467a-9fcd-4b5110772361\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.139991 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4ptm\" (UniqueName: \"kubernetes.io/projected/acf7edb2-ebde-4783-b428-b6ce809be9f9-kube-api-access-l4ptm\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.140018 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f2bnj\" (UniqueName: \"kubernetes.io/projected/84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7-kube-api-access-f2bnj\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.151799 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c647ccff-95d1-467a-9fcd-4b5110772361-kube-api-access-xtnhh" (OuterVolumeSpecName: "kube-api-access-xtnhh") pod "c647ccff-95d1-467a-9fcd-4b5110772361" (UID: "c647ccff-95d1-467a-9fcd-4b5110772361"). InnerVolumeSpecName "kube-api-access-xtnhh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.163491 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.164161 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="sg-core" containerID="cri-o://919ef65eeeea598c7b3f2a94cda4ed6f77049d1b185e1b4613c3eaa5ae3f9970" gracePeriod=30 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.164282 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="proxy-httpd" containerID="cri-o://c87bdbf1b6954a66227351f91bbd585daf3ee9facdce59279624e5a7379654be" gracePeriod=30 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.164334 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="ceilometer-notification-agent" containerID="cri-o://bbcfbd58af515683c0fa41f7883c1636fcf3f11322b07edf17837f0776642867" gracePeriod=30 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.163836 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="ceilometer-central-agent" containerID="cri-o://e7de0a43fb54dfe049156667e0b998705b9e78b7bff9600f863738f69bbff3b9" gracePeriod=30 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.172165 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.172369 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="af2ab03e-3011-4d6c-bbd5-ac5453d7785d" containerName="kube-state-metrics" containerID="cri-o://2e4ad5facb5f08bffeaad25b16cd84fad4909f52d97b67679d95970d047e100f" gracePeriod=30 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.242430 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l66tk\" (UniqueName: \"kubernetes.io/projected/7cf968bd-0ddb-4647-8c76-8038cb19d053-kube-api-access-l66tk\") pod \"7cf968bd-0ddb-4647-8c76-8038cb19d053\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.242475 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-config-data\") pod \"7cf968bd-0ddb-4647-8c76-8038cb19d053\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.242560 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8wlzr\" (UniqueName: \"kubernetes.io/projected/9eb6d469-724e-4dc7-943c-60454db39c60-kube-api-access-8wlzr\") pod \"9eb6d469-724e-4dc7-943c-60454db39c60\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.242609 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-config-data\") pod \"9eb6d469-724e-4dc7-943c-60454db39c60\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.242665 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-combined-ca-bundle\") pod \"7cf968bd-0ddb-4647-8c76-8038cb19d053\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.242700 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-config-data-custom\") pod \"7cf968bd-0ddb-4647-8c76-8038cb19d053\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.242745 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-nova-novncproxy-tls-certs\") pod \"9eb6d469-724e-4dc7-943c-60454db39c60\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.242901 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7cf968bd-0ddb-4647-8c76-8038cb19d053-logs\") pod \"7cf968bd-0ddb-4647-8c76-8038cb19d053\" (UID: \"7cf968bd-0ddb-4647-8c76-8038cb19d053\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.242948 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-combined-ca-bundle\") pod \"9eb6d469-724e-4dc7-943c-60454db39c60\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.242987 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-vencrypt-tls-certs\") pod \"9eb6d469-724e-4dc7-943c-60454db39c60\" (UID: \"9eb6d469-724e-4dc7-943c-60454db39c60\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.243508 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtnhh\" (UniqueName: \"kubernetes.io/projected/c647ccff-95d1-467a-9fcd-4b5110772361-kube-api-access-xtnhh\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.249180 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cf968bd-0ddb-4647-8c76-8038cb19d053-kube-api-access-l66tk" (OuterVolumeSpecName: "kube-api-access-l66tk") pod "7cf968bd-0ddb-4647-8c76-8038cb19d053" (UID: "7cf968bd-0ddb-4647-8c76-8038cb19d053"). InnerVolumeSpecName "kube-api-access-l66tk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.250483 5055 generic.go:334] "Generic (PLEG): container finished" podID="01de7a83-4a08-494a-a3b9-774e6787e30f" containerID="5433269dd3db71b29ff92097b8c5e94a07217bab29089e9289f976033732e76a" exitCode=0 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.250592 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"01de7a83-4a08-494a-a3b9-774e6787e30f","Type":"ContainerDied","Data":"5433269dd3db71b29ff92097b8c5e94a07217bab29089e9289f976033732e76a"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.253939 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7cf968bd-0ddb-4647-8c76-8038cb19d053-logs" (OuterVolumeSpecName: "logs") pod "7cf968bd-0ddb-4647-8c76-8038cb19d053" (UID: "7cf968bd-0ddb-4647-8c76-8038cb19d053"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.267349 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "7cf968bd-0ddb-4647-8c76-8038cb19d053" (UID: "7cf968bd-0ddb-4647-8c76-8038cb19d053"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.268397 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementd9e7-account-delete-jsjrs" event={"ID":"f99e172e-068d-423a-b797-2467e352a41b","Type":"ContainerDied","Data":"8511f64421163b3753c44062369411c09fc6521d3801027ea6498709bdd2e7a7"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.268466 5055 scope.go:117] "RemoveContainer" containerID="3dec843ae2891ce55d56d4dae7c2a19d4ac03ffc87ec97d3f77f19af0aaedb44" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.269446 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placementd9e7-account-delete-jsjrs" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.272369 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.288747 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.289017 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="1bebbd2e-a315-493d-820b-69e8dc749ee1" containerName="memcached" containerID="cri-o://10d71784edd3f6ec2fbce87c08a6c3ea956ccb7e0b266974fc673f0bc6f0afd2" gracePeriod=30 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.289063 5055 generic.go:334] "Generic (PLEG): container finished" podID="acf7edb2-ebde-4783-b428-b6ce809be9f9" containerID="1b5cf5d9b22890e585b1b240aa56900b010b846adfc5872b84eed4d8a650ebe7" exitCode=1 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.289175 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell14c23-account-delete-sgt68" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.289548 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell14c23-account-delete-sgt68" event={"ID":"acf7edb2-ebde-4783-b428-b6ce809be9f9","Type":"ContainerDied","Data":"1b5cf5d9b22890e585b1b240aa56900b010b846adfc5872b84eed4d8a650ebe7"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.289584 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell14c23-account-delete-sgt68" event={"ID":"acf7edb2-ebde-4783-b428-b6ce809be9f9","Type":"ContainerDied","Data":"f2744e412e97c231ce46e0a3d4ff3373bd55515893600dd112e6474cf31e24ae"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.303314 5055 generic.go:334] "Generic (PLEG): container finished" podID="c647ccff-95d1-467a-9fcd-4b5110772361" containerID="9271859b9feb8e20c8fadf46541a0ab61811884881985a15da9c5b83f0372847" exitCode=0 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.307947 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder0232-account-delete-6nzbr" event={"ID":"c647ccff-95d1-467a-9fcd-4b5110772361","Type":"ContainerDied","Data":"50479c196e7327c65aee41754b6af0da59a3e387d101a5c5bf82f4079ddb1d67"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.308038 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder0232-account-delete-6nzbr" event={"ID":"c647ccff-95d1-467a-9fcd-4b5110772361","Type":"ContainerDied","Data":"9271859b9feb8e20c8fadf46541a0ab61811884881985a15da9c5b83f0372847"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.316708 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder0232-account-delete-6nzbr" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.317580 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9eb6d469-724e-4dc7-943c-60454db39c60-kube-api-access-8wlzr" (OuterVolumeSpecName: "kube-api-access-8wlzr") pod "9eb6d469-724e-4dc7-943c-60454db39c60" (UID: "9eb6d469-724e-4dc7-943c-60454db39c60"). InnerVolumeSpecName "kube-api-access-8wlzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.349053 5055 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.349087 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7cf968bd-0ddb-4647-8c76-8038cb19d053-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.349097 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l66tk\" (UniqueName: \"kubernetes.io/projected/7cf968bd-0ddb-4647-8c76-8038cb19d053-kube-api-access-l66tk\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.349108 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8wlzr\" (UniqueName: \"kubernetes.io/projected/9eb6d469-724e-4dc7-943c-60454db39c60-kube-api-access-8wlzr\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.356571 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-gwmkb"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.427560 5055 generic.go:334] "Generic (PLEG): container finished" podID="c7e77464-d0e5-4a9b-97a1-557cac810654" containerID="82a484cebdaeca44b956dd6026c1a5e65bf7f6fa2ec01d1293e086f33d6250f6" exitCode=0 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.432633 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-55cv5"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.432697 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9f95b955d-vptlr" event={"ID":"c7e77464-d0e5-4a9b-97a1-557cac810654","Type":"ContainerDied","Data":"82a484cebdaeca44b956dd6026c1a5e65bf7f6fa2ec01d1293e086f33d6250f6"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.451246 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-public-tls-certs\") pod \"5f76ee4c-bc76-492a-898d-def8ec69e291\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.451310 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-config-data\") pod \"5f76ee4c-bc76-492a-898d-def8ec69e291\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.451354 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c79fj\" (UniqueName: \"kubernetes.io/projected/5f76ee4c-bc76-492a-898d-def8ec69e291-kube-api-access-c79fj\") pod \"5f76ee4c-bc76-492a-898d-def8ec69e291\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.451392 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-internal-tls-certs\") pod \"5f76ee4c-bc76-492a-898d-def8ec69e291\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.451543 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f76ee4c-bc76-492a-898d-def8ec69e291-etc-swift\") pod \"5f76ee4c-bc76-492a-898d-def8ec69e291\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.452775 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-combined-ca-bundle\") pod \"5f76ee4c-bc76-492a-898d-def8ec69e291\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.452807 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5f76ee4c-bc76-492a-898d-def8ec69e291-log-httpd\") pod \"5f76ee4c-bc76-492a-898d-def8ec69e291\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.452853 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5f76ee4c-bc76-492a-898d-def8ec69e291-run-httpd\") pod \"5f76ee4c-bc76-492a-898d-def8ec69e291\" (UID: \"5f76ee4c-bc76-492a-898d-def8ec69e291\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.453200 5055 generic.go:334] "Generic (PLEG): container finished" podID="4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241" containerID="00c617aa4b929d980bf4005136dd8cb35aa22f6579b8d52e5d42af62b008cfc3" exitCode=0 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.453445 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi9573-account-delete-4cwkv" event={"ID":"4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241","Type":"ContainerDied","Data":"00c617aa4b929d980bf4005136dd8cb35aa22f6579b8d52e5d42af62b008cfc3"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.454186 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f76ee4c-bc76-492a-898d-def8ec69e291-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5f76ee4c-bc76-492a-898d-def8ec69e291" (UID: "5f76ee4c-bc76-492a-898d-def8ec69e291"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.454996 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-config-data" (OuterVolumeSpecName: "config-data") pod "9eb6d469-724e-4dc7-943c-60454db39c60" (UID: "9eb6d469-724e-4dc7-943c-60454db39c60"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.465976 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-55cv5"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.472108 5055 generic.go:334] "Generic (PLEG): container finished" podID="5f76ee4c-bc76-492a-898d-def8ec69e291" containerID="50f25ffb536fa0772a202d0f5854aa815a2d08286c15c5285861895cfbdcbfc1" exitCode=0 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.472346 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6c8466445c-jqscq" event={"ID":"5f76ee4c-bc76-492a-898d-def8ec69e291","Type":"ContainerDied","Data":"50f25ffb536fa0772a202d0f5854aa815a2d08286c15c5285861895cfbdcbfc1"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.472394 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6c8466445c-jqscq" event={"ID":"5f76ee4c-bc76-492a-898d-def8ec69e291","Type":"ContainerDied","Data":"85f4eca87ee2e65bc1774d422d6ae86c472d1e300f216c966d8229e39cf0c48c"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.472503 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6c8466445c-jqscq" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.480634 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-gwmkb"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.481308 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f76ee4c-bc76-492a-898d-def8ec69e291-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5f76ee4c-bc76-492a-898d-def8ec69e291" (UID: "5f76ee4c-bc76-492a-898d-def8ec69e291"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.484100 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="8950328a-3275-4ba9-8bd8-ea4940f2eaac" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.168:8776/healthcheck\": read tcp 10.217.0.2:56804->10.217.0.168:8776: read: connection reset by peer" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.487074 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f76ee4c-bc76-492a-898d-def8ec69e291-kube-api-access-c79fj" (OuterVolumeSpecName: "kube-api-access-c79fj") pod "5f76ee4c-bc76-492a-898d-def8ec69e291" (UID: "5f76ee4c-bc76-492a-898d-def8ec69e291"). InnerVolumeSpecName "kube-api-access-c79fj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.487534 5055 generic.go:334] "Generic (PLEG): container finished" podID="9eb6d469-724e-4dc7-943c-60454db39c60" containerID="d21aed6aaefbc9580be9ca7f2e6eb8ebd1a3f5f50e101c4c845a2f3a24925d68" exitCode=0 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.487666 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9eb6d469-724e-4dc7-943c-60454db39c60","Type":"ContainerDied","Data":"d21aed6aaefbc9580be9ca7f2e6eb8ebd1a3f5f50e101c4c845a2f3a24925d68"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.487908 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9eb6d469-724e-4dc7-943c-60454db39c60","Type":"ContainerDied","Data":"f6972eb41950d9e30be213715d7741d5bc4508bbc234e6f18dd44c5f48eba493"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.487709 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.490144 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7cf968bd-0ddb-4647-8c76-8038cb19d053" (UID: "7cf968bd-0ddb-4647-8c76-8038cb19d053"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.501342 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-867958d55b-prp6q"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.501732 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-867958d55b-prp6q" podUID="5108504f-f2dd-4f43-8d7a-f630e055d661" containerName="keystone-api" containerID="cri-o://e8e6bf3ea958d93967bb08adda0bad8a6b27f850a70be0cd9489070df6fd455e" gracePeriod=30 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.517956 5055 scope.go:117] "RemoveContainer" containerID="1b5cf5d9b22890e585b1b240aa56900b010b846adfc5872b84eed4d8a650ebe7" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.521125 5055 generic.go:334] "Generic (PLEG): container finished" podID="01e71e0c-56f0-4bfc-bd58-501785a5d235" containerID="5b7de1ee0c33bc352a3efec430407c4b7fc6aa49d8e39cef8efbd11931ec62fb" exitCode=0 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.521264 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0372a-account-delete-68qnz" event={"ID":"01e71e0c-56f0-4bfc-bd58-501785a5d235","Type":"ContainerDied","Data":"5b7de1ee0c33bc352a3efec430407c4b7fc6aa49d8e39cef8efbd11931ec62fb"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.525708 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f76ee4c-bc76-492a-898d-def8ec69e291-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "5f76ee4c-bc76-492a-898d-def8ec69e291" (UID: "5f76ee4c-bc76-492a-898d-def8ec69e291"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.544150 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.544293 5055 generic.go:334] "Generic (PLEG): container finished" podID="7cf968bd-0ddb-4647-8c76-8038cb19d053" containerID="6c92db08f0560ec22ab28b315f7ec817b7f7ae5ff9b9adbe69e0c475a691fdb1" exitCode=0 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.544548 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" event={"ID":"7cf968bd-0ddb-4647-8c76-8038cb19d053","Type":"ContainerDied","Data":"6c92db08f0560ec22ab28b315f7ec817b7f7ae5ff9b9adbe69e0c475a691fdb1"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.544663 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-68ddb9fb98-g6thb" event={"ID":"7cf968bd-0ddb-4647-8c76-8038cb19d053","Type":"ContainerDied","Data":"64228fa0248689ac70aec2777d6d69b8da2c68388d2ccbd55401815b2f00144b"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.549317 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.555547 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.555757 5055 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5f76ee4c-bc76-492a-898d-def8ec69e291-etc-swift\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.555835 5055 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5f76ee4c-bc76-492a-898d-def8ec69e291-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.555891 5055 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5f76ee4c-bc76-492a-898d-def8ec69e291-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.555940 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c79fj\" (UniqueName: \"kubernetes.io/projected/5f76ee4c-bc76-492a-898d-def8ec69e291-kube-api-access-c79fj\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.555993 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: E1011 07:17:51.556006 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fb731c00bae126ce96a2762c9416854d8181fa8097933c9a989d30dcc1d84d82" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.555707 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "9eb6d469-724e-4dc7-943c-60454db39c60" (UID: "9eb6d469-724e-4dc7-943c-60454db39c60"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.558708 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "9eb6d469-724e-4dc7-943c-60454db39c60" (UID: "9eb6d469-724e-4dc7-943c-60454db39c60"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.558893 5055 generic.go:334] "Generic (PLEG): container finished" podID="f360ed99-6efd-4281-a18b-7597df5341b6" containerID="9005a9d40210899cb8c06dcf5d2cc8762eda1239eaeb43568f31ef7af56a71cc" exitCode=0 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.559006 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance5ef3-account-delete-px4qv" event={"ID":"f360ed99-6efd-4281-a18b-7597df5341b6","Type":"ContainerDied","Data":"9005a9d40210899cb8c06dcf5d2cc8762eda1239eaeb43568f31ef7af56a71cc"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.563266 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9eb6d469-724e-4dc7-943c-60454db39c60" (UID: "9eb6d469-724e-4dc7-943c-60454db39c60"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.564983 5055 generic.go:334] "Generic (PLEG): container finished" podID="84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7" containerID="21e1fddac948ab938719326e70c908df0e32494cb87949b6a596230d110665c1" exitCode=0 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.565026 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican1232-account-delete-ct7zg" event={"ID":"84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7","Type":"ContainerDied","Data":"21e1fddac948ab938719326e70c908df0e32494cb87949b6a596230d110665c1"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.565049 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican1232-account-delete-ct7zg" event={"ID":"84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7","Type":"ContainerDied","Data":"d32cfc38507899643fce06fd539b392ff57507ec814b18d2c61625782ef693c4"} Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.565093 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican1232-account-delete-ct7zg" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.567219 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-t794s"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.581297 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-fe2c-account-create-9pdsz"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.591790 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-fe2c-account-create-9pdsz"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.596069 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-t794s"] Oct 11 07:17:51 crc kubenswrapper[5055]: E1011 07:17:51.632234 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fb731c00bae126ce96a2762c9416854d8181fa8097933c9a989d30dcc1d84d82" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.634004 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 11 07:17:51 crc kubenswrapper[5055]: E1011 07:17:51.635205 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fb731c00bae126ce96a2762c9416854d8181fa8097933c9a989d30dcc1d84d82" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 11 07:17:51 crc kubenswrapper[5055]: E1011 07:17:51.635251 5055 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="6a52d356-2098-4a61-be67-5519a2d2b78f" containerName="nova-cell1-conductor-conductor" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.643913 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-config-data" (OuterVolumeSpecName: "config-data") pod "7cf968bd-0ddb-4647-8c76-8038cb19d053" (UID: "7cf968bd-0ddb-4647-8c76-8038cb19d053"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.665473 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"01de7a83-4a08-494a-a3b9-774e6787e30f\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.665592 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-galera-tls-certs\") pod \"01de7a83-4a08-494a-a3b9-774e6787e30f\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.665620 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-combined-ca-bundle\") pod \"01de7a83-4a08-494a-a3b9-774e6787e30f\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.665639 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-kolla-config\") pod \"01de7a83-4a08-494a-a3b9-774e6787e30f\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.665700 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/01de7a83-4a08-494a-a3b9-774e6787e30f-config-data-generated\") pod \"01de7a83-4a08-494a-a3b9-774e6787e30f\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.665801 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-config-data-default\") pod \"01de7a83-4a08-494a-a3b9-774e6787e30f\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.665823 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-operator-scripts\") pod \"01de7a83-4a08-494a-a3b9-774e6787e30f\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.665865 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5s5nf\" (UniqueName: \"kubernetes.io/projected/01de7a83-4a08-494a-a3b9-774e6787e30f-kube-api-access-5s5nf\") pod \"01de7a83-4a08-494a-a3b9-774e6787e30f\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.670728 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01de7a83-4a08-494a-a3b9-774e6787e30f-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "01de7a83-4a08-494a-a3b9-774e6787e30f" (UID: "01de7a83-4a08-494a-a3b9-774e6787e30f"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.676396 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-secrets\") pod \"01de7a83-4a08-494a-a3b9-774e6787e30f\" (UID: \"01de7a83-4a08-494a-a3b9-774e6787e30f\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.677103 5055 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.677115 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.677126 5055 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/9eb6d469-724e-4dc7-943c-60454db39c60-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.677136 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cf968bd-0ddb-4647-8c76-8038cb19d053-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.677404 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "01de7a83-4a08-494a-a3b9-774e6787e30f" (UID: "01de7a83-4a08-494a-a3b9-774e6787e30f"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.681614 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "01de7a83-4a08-494a-a3b9-774e6787e30f" (UID: "01de7a83-4a08-494a-a3b9-774e6787e30f"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.684149 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01de7a83-4a08-494a-a3b9-774e6787e30f-kube-api-access-5s5nf" (OuterVolumeSpecName: "kube-api-access-5s5nf") pod "01de7a83-4a08-494a-a3b9-774e6787e30f" (UID: "01de7a83-4a08-494a-a3b9-774e6787e30f"). InnerVolumeSpecName "kube-api-access-5s5nf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.686546 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "5f76ee4c-bc76-492a-898d-def8ec69e291" (UID: "5f76ee4c-bc76-492a-898d-def8ec69e291"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.688881 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "01de7a83-4a08-494a-a3b9-774e6787e30f" (UID: "01de7a83-4a08-494a-a3b9-774e6787e30f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.699672 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-secrets" (OuterVolumeSpecName: "secrets") pod "01de7a83-4a08-494a-a3b9-774e6787e30f" (UID: "01de7a83-4a08-494a-a3b9-774e6787e30f"). InnerVolumeSpecName "secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.711279 5055 scope.go:117] "RemoveContainer" containerID="1b5cf5d9b22890e585b1b240aa56900b010b846adfc5872b84eed4d8a650ebe7" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.711293 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "mysql-db") pod "01de7a83-4a08-494a-a3b9-774e6787e30f" (UID: "01de7a83-4a08-494a-a3b9-774e6787e30f"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.711405 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "5f76ee4c-bc76-492a-898d-def8ec69e291" (UID: "5f76ee4c-bc76-492a-898d-def8ec69e291"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: E1011 07:17:51.711684 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b5cf5d9b22890e585b1b240aa56900b010b846adfc5872b84eed4d8a650ebe7\": container with ID starting with 1b5cf5d9b22890e585b1b240aa56900b010b846adfc5872b84eed4d8a650ebe7 not found: ID does not exist" containerID="1b5cf5d9b22890e585b1b240aa56900b010b846adfc5872b84eed4d8a650ebe7" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.711716 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b5cf5d9b22890e585b1b240aa56900b010b846adfc5872b84eed4d8a650ebe7"} err="failed to get container status \"1b5cf5d9b22890e585b1b240aa56900b010b846adfc5872b84eed4d8a650ebe7\": rpc error: code = NotFound desc = could not find container \"1b5cf5d9b22890e585b1b240aa56900b010b846adfc5872b84eed4d8a650ebe7\": container with ID starting with 1b5cf5d9b22890e585b1b240aa56900b010b846adfc5872b84eed4d8a650ebe7 not found: ID does not exist" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.711735 5055 scope.go:117] "RemoveContainer" containerID="9271859b9feb8e20c8fadf46541a0ab61811884881985a15da9c5b83f0372847" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.715888 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placementd9e7-account-delete-jsjrs"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.729960 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placementd9e7-account-delete-jsjrs"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.736606 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell14c23-account-delete-sgt68"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.742587 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell14c23-account-delete-sgt68"] Oct 11 07:17:51 crc kubenswrapper[5055]: W1011 07:17:51.746001 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4fdc6f8c_1c91_45d3_80bc_d07c5953a385.slice/crio-bd430b9b7cb485334722137314bd920920e251188a54119a9ef666b1d4dba06d WatchSource:0}: Error finding container bd430b9b7cb485334722137314bd920920e251188a54119a9ef666b1d4dba06d: Status 404 returned error can't find the container with id bd430b9b7cb485334722137314bd920920e251188a54119a9ef666b1d4dba06d Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.751326 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-stvkx"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.757435 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.763262 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-stvkx"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.766016 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-9573-account-create-4kl4d"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.779594 5055 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-config-data-default\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.779624 5055 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-operator-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.779634 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5s5nf\" (UniqueName: \"kubernetes.io/projected/01de7a83-4a08-494a-a3b9-774e6787e30f-kube-api-access-5s5nf\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.779643 5055 reconciler_common.go:293] "Volume detached for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-secrets\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.779662 5055 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.779671 5055 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.779681 5055 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/01de7a83-4a08-494a-a3b9-774e6787e30f-kolla-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.779689 5055 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.779697 5055 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/01de7a83-4a08-494a-a3b9-774e6787e30f-config-data-generated\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.782472 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-9573-account-create-4kl4d"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.807224 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi9573-account-delete-4cwkv"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.822462 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder0232-account-delete-6nzbr"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.828732 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder0232-account-delete-6nzbr"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.833656 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican1232-account-delete-ct7zg"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.836912 5055 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.839879 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican1232-account-delete-ct7zg"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.844331 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "01de7a83-4a08-494a-a3b9-774e6787e30f" (UID: "01de7a83-4a08-494a-a3b9-774e6787e30f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.844476 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zpjhx"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.870312 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.872533 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.873856 5055 scope.go:117] "RemoveContainer" containerID="9271859b9feb8e20c8fadf46541a0ab61811884881985a15da9c5b83f0372847" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.874258 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5f76ee4c-bc76-492a-898d-def8ec69e291" (UID: "5f76ee4c-bc76-492a-898d-def8ec69e291"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: E1011 07:17:51.874310 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9271859b9feb8e20c8fadf46541a0ab61811884881985a15da9c5b83f0372847\": container with ID starting with 9271859b9feb8e20c8fadf46541a0ab61811884881985a15da9c5b83f0372847 not found: ID does not exist" containerID="9271859b9feb8e20c8fadf46541a0ab61811884881985a15da9c5b83f0372847" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.874371 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9271859b9feb8e20c8fadf46541a0ab61811884881985a15da9c5b83f0372847"} err="failed to get container status \"9271859b9feb8e20c8fadf46541a0ab61811884881985a15da9c5b83f0372847\": rpc error: code = NotFound desc = could not find container \"9271859b9feb8e20c8fadf46541a0ab61811884881985a15da9c5b83f0372847\": container with ID starting with 9271859b9feb8e20c8fadf46541a0ab61811884881985a15da9c5b83f0372847 not found: ID does not exist" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.874399 5055 scope.go:117] "RemoveContainer" containerID="50f25ffb536fa0772a202d0f5854aa815a2d08286c15c5285861895cfbdcbfc1" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.880897 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-scripts\") pod \"c7e77464-d0e5-4a9b-97a1-557cac810654\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.881032 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7e77464-d0e5-4a9b-97a1-557cac810654-logs\") pod \"c7e77464-d0e5-4a9b-97a1-557cac810654\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.881110 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-config-data\") pod \"c7e77464-d0e5-4a9b-97a1-557cac810654\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.881149 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-public-tls-certs\") pod \"c7e77464-d0e5-4a9b-97a1-557cac810654\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.881213 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-combined-ca-bundle\") pod \"c7e77464-d0e5-4a9b-97a1-557cac810654\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.881242 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qvcv\" (UniqueName: \"kubernetes.io/projected/c7e77464-d0e5-4a9b-97a1-557cac810654-kube-api-access-4qvcv\") pod \"c7e77464-d0e5-4a9b-97a1-557cac810654\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.881262 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-internal-tls-certs\") pod \"c7e77464-d0e5-4a9b-97a1-557cac810654\" (UID: \"c7e77464-d0e5-4a9b-97a1-557cac810654\") " Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.881655 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.881682 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.881691 5055 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.881842 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-config-data" (OuterVolumeSpecName: "config-data") pod "5f76ee4c-bc76-492a-898d-def8ec69e291" (UID: "5f76ee4c-bc76-492a-898d-def8ec69e291"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.888707 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7e77464-d0e5-4a9b-97a1-557cac810654-logs" (OuterVolumeSpecName: "logs") pod "c7e77464-d0e5-4a9b-97a1-557cac810654" (UID: "c7e77464-d0e5-4a9b-97a1-557cac810654"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.895825 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-68ddb9fb98-g6thb"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.897721 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-scripts" (OuterVolumeSpecName: "scripts") pod "c7e77464-d0e5-4a9b-97a1-557cac810654" (UID: "c7e77464-d0e5-4a9b-97a1-557cac810654"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.900603 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-68ddb9fb98-g6thb"] Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.910944 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7e77464-d0e5-4a9b-97a1-557cac810654-kube-api-access-4qvcv" (OuterVolumeSpecName: "kube-api-access-4qvcv") pod "c7e77464-d0e5-4a9b-97a1-557cac810654" (UID: "c7e77464-d0e5-4a9b-97a1-557cac810654"). InnerVolumeSpecName "kube-api-access-4qvcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.914918 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="b6c56af9-552f-4b5a-9648-f28af1328534" containerName="galera" containerID="cri-o://5df485ac2f70f05a177e6e141ec2dc723c3f8a0fa7ae8342e45af4f8db485f9b" gracePeriod=30 Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.974176 5055 scope.go:117] "RemoveContainer" containerID="3f492337a36f4101c61292981fed7289c528328ea9ced5f8117499fb88135159" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.983009 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qvcv\" (UniqueName: \"kubernetes.io/projected/c7e77464-d0e5-4a9b-97a1-557cac810654-kube-api-access-4qvcv\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.983041 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.983053 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f76ee4c-bc76-492a-898d-def8ec69e291-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.983061 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7e77464-d0e5-4a9b-97a1-557cac810654-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:51 crc kubenswrapper[5055]: I1011 07:17:51.995231 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.196:3000/\": dial tcp 10.217.0.196:3000: connect: connection refused" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.067952 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-config-data" (OuterVolumeSpecName: "config-data") pod "c7e77464-d0e5-4a9b-97a1-557cac810654" (UID: "c7e77464-d0e5-4a9b-97a1-557cac810654"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.089098 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.098515 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0372a-account-delete-68qnz" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.101927 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "01de7a83-4a08-494a-a3b9-774e6787e30f" (UID: "01de7a83-4a08-494a-a3b9-774e6787e30f"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.113170 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c7e77464-d0e5-4a9b-97a1-557cac810654" (UID: "c7e77464-d0e5-4a9b-97a1-557cac810654"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.174251 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-6c8466445c-jqscq"] Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.174300 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-6c8466445c-jqscq"] Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.192934 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8fpdr\" (UniqueName: \"kubernetes.io/projected/01e71e0c-56f0-4bfc-bd58-501785a5d235-kube-api-access-8fpdr\") pod \"01e71e0c-56f0-4bfc-bd58-501785a5d235\" (UID: \"01e71e0c-56f0-4bfc-bd58-501785a5d235\") " Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.193365 5055 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/01de7a83-4a08-494a-a3b9-774e6787e30f-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.193385 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.197033 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c7e77464-d0e5-4a9b-97a1-557cac810654" (UID: "c7e77464-d0e5-4a9b-97a1-557cac810654"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.216831 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01e71e0c-56f0-4bfc-bd58-501785a5d235-kube-api-access-8fpdr" (OuterVolumeSpecName: "kube-api-access-8fpdr") pod "01e71e0c-56f0-4bfc-bd58-501785a5d235" (UID: "01e71e0c-56f0-4bfc-bd58-501785a5d235"). InnerVolumeSpecName "kube-api-access-8fpdr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.264487 5055 scope.go:117] "RemoveContainer" containerID="50f25ffb536fa0772a202d0f5854aa815a2d08286c15c5285861895cfbdcbfc1" Oct 11 07:17:52 crc kubenswrapper[5055]: E1011 07:17:52.265020 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50f25ffb536fa0772a202d0f5854aa815a2d08286c15c5285861895cfbdcbfc1\": container with ID starting with 50f25ffb536fa0772a202d0f5854aa815a2d08286c15c5285861895cfbdcbfc1 not found: ID does not exist" containerID="50f25ffb536fa0772a202d0f5854aa815a2d08286c15c5285861895cfbdcbfc1" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.265049 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50f25ffb536fa0772a202d0f5854aa815a2d08286c15c5285861895cfbdcbfc1"} err="failed to get container status \"50f25ffb536fa0772a202d0f5854aa815a2d08286c15c5285861895cfbdcbfc1\": rpc error: code = NotFound desc = could not find container \"50f25ffb536fa0772a202d0f5854aa815a2d08286c15c5285861895cfbdcbfc1\": container with ID starting with 50f25ffb536fa0772a202d0f5854aa815a2d08286c15c5285861895cfbdcbfc1 not found: ID does not exist" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.265103 5055 scope.go:117] "RemoveContainer" containerID="3f492337a36f4101c61292981fed7289c528328ea9ced5f8117499fb88135159" Oct 11 07:17:52 crc kubenswrapper[5055]: E1011 07:17:52.265453 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f492337a36f4101c61292981fed7289c528328ea9ced5f8117499fb88135159\": container with ID starting with 3f492337a36f4101c61292981fed7289c528328ea9ced5f8117499fb88135159 not found: ID does not exist" containerID="3f492337a36f4101c61292981fed7289c528328ea9ced5f8117499fb88135159" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.265471 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f492337a36f4101c61292981fed7289c528328ea9ced5f8117499fb88135159"} err="failed to get container status \"3f492337a36f4101c61292981fed7289c528328ea9ced5f8117499fb88135159\": rpc error: code = NotFound desc = could not find container \"3f492337a36f4101c61292981fed7289c528328ea9ced5f8117499fb88135159\": container with ID starting with 3f492337a36f4101c61292981fed7289c528328ea9ced5f8117499fb88135159 not found: ID does not exist" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.265483 5055 scope.go:117] "RemoveContainer" containerID="d21aed6aaefbc9580be9ca7f2e6eb8ebd1a3f5f50e101c4c845a2f3a24925d68" Oct 11 07:17:52 crc kubenswrapper[5055]: E1011 07:17:52.295311 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="85214f65e9b2ae3cca29ac8e1565c1deb2a9429c77e7931a85221321ed7db87e" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.296552 5055 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.296578 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8fpdr\" (UniqueName: \"kubernetes.io/projected/01e71e0c-56f0-4bfc-bd58-501785a5d235-kube-api-access-8fpdr\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:52 crc kubenswrapper[5055]: E1011 07:17:52.308743 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="85214f65e9b2ae3cca29ac8e1565c1deb2a9429c77e7931a85221321ed7db87e" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Oct 11 07:17:52 crc kubenswrapper[5055]: E1011 07:17:52.325041 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="85214f65e9b2ae3cca29ac8e1565c1deb2a9429c77e7931a85221321ed7db87e" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Oct 11 07:17:52 crc kubenswrapper[5055]: E1011 07:17:52.325105 5055 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="86907505-2c19-41ed-b72d-0b8bbeab1eec" containerName="ovn-northd" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.326989 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c7e77464-d0e5-4a9b-97a1-557cac810654" (UID: "c7e77464-d0e5-4a9b-97a1-557cac810654"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.404974 5055 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7e77464-d0e5-4a9b-97a1-557cac810654-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.615184 5055 generic.go:334] "Generic (PLEG): container finished" podID="3cc22641-a524-415a-8377-580664ed5d90" containerID="c87bdbf1b6954a66227351f91bbd585daf3ee9facdce59279624e5a7379654be" exitCode=0 Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.615213 5055 generic.go:334] "Generic (PLEG): container finished" podID="3cc22641-a524-415a-8377-580664ed5d90" containerID="919ef65eeeea598c7b3f2a94cda4ed6f77049d1b185e1b4613c3eaa5ae3f9970" exitCode=2 Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.615221 5055 generic.go:334] "Generic (PLEG): container finished" podID="3cc22641-a524-415a-8377-580664ed5d90" containerID="bbcfbd58af515683c0fa41f7883c1636fcf3f11322b07edf17837f0776642867" exitCode=0 Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.615227 5055 generic.go:334] "Generic (PLEG): container finished" podID="3cc22641-a524-415a-8377-580664ed5d90" containerID="e7de0a43fb54dfe049156667e0b998705b9e78b7bff9600f863738f69bbff3b9" exitCode=0 Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.615261 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3cc22641-a524-415a-8377-580664ed5d90","Type":"ContainerDied","Data":"c87bdbf1b6954a66227351f91bbd585daf3ee9facdce59279624e5a7379654be"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.615337 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3cc22641-a524-415a-8377-580664ed5d90","Type":"ContainerDied","Data":"919ef65eeeea598c7b3f2a94cda4ed6f77049d1b185e1b4613c3eaa5ae3f9970"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.615356 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3cc22641-a524-415a-8377-580664ed5d90","Type":"ContainerDied","Data":"bbcfbd58af515683c0fa41f7883c1636fcf3f11322b07edf17837f0776642867"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.615369 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3cc22641-a524-415a-8377-580664ed5d90","Type":"ContainerDied","Data":"e7de0a43fb54dfe049156667e0b998705b9e78b7bff9600f863738f69bbff3b9"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.618517 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0372a-account-delete-68qnz" event={"ID":"01e71e0c-56f0-4bfc-bd58-501785a5d235","Type":"ContainerDied","Data":"11cee0c2a6d62bfd38dbf718d4cc936e7d223cab95f64b69aa71174befaef7ca"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.618551 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0372a-account-delete-68qnz" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.626317 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"01de7a83-4a08-494a-a3b9-774e6787e30f","Type":"ContainerDied","Data":"9e0288c284168c302b026f8fa535bc1a6b3b6d591b4180866429e5afe6ef8460"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.626342 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.628818 5055 generic.go:334] "Generic (PLEG): container finished" podID="6a52d356-2098-4a61-be67-5519a2d2b78f" containerID="fb731c00bae126ce96a2762c9416854d8181fa8097933c9a989d30dcc1d84d82" exitCode=0 Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.628861 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6a52d356-2098-4a61-be67-5519a2d2b78f","Type":"ContainerDied","Data":"fb731c00bae126ce96a2762c9416854d8181fa8097933c9a989d30dcc1d84d82"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.630004 5055 generic.go:334] "Generic (PLEG): container finished" podID="e2e0c881-0205-4317-ba03-cfb76f0f69e6" containerID="cf5f42235a42f219aee9d395cf87f7e377aaf80da0d2626e197e998f6d477ba8" exitCode=0 Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.630046 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e2e0c881-0205-4317-ba03-cfb76f0f69e6","Type":"ContainerDied","Data":"cf5f42235a42f219aee9d395cf87f7e377aaf80da0d2626e197e998f6d477ba8"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.636411 5055 generic.go:334] "Generic (PLEG): container finished" podID="8950328a-3275-4ba9-8bd8-ea4940f2eaac" containerID="ce9f9d6bb4b08df1523515f638c8c0fd3d6d8d86faabc0808907511f9e7af1e3" exitCode=0 Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.636457 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8950328a-3275-4ba9-8bd8-ea4940f2eaac","Type":"ContainerDied","Data":"ce9f9d6bb4b08df1523515f638c8c0fd3d6d8d86faabc0808907511f9e7af1e3"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.639346 5055 generic.go:334] "Generic (PLEG): container finished" podID="af2ab03e-3011-4d6c-bbd5-ac5453d7785d" containerID="2e4ad5facb5f08bffeaad25b16cd84fad4909f52d97b67679d95970d047e100f" exitCode=2 Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.639411 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"af2ab03e-3011-4d6c-bbd5-ac5453d7785d","Type":"ContainerDied","Data":"2e4ad5facb5f08bffeaad25b16cd84fad4909f52d97b67679d95970d047e100f"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.642839 5055 generic.go:334] "Generic (PLEG): container finished" podID="4fdc6f8c-1c91-45d3-80bc-d07c5953a385" containerID="af810127c5fc81fd3e307037286fa19e2ade443da2ed3e97626fa9c0a5ba8601" exitCode=0 Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.642947 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zpjhx" event={"ID":"4fdc6f8c-1c91-45d3-80bc-d07c5953a385","Type":"ContainerDied","Data":"af810127c5fc81fd3e307037286fa19e2ade443da2ed3e97626fa9c0a5ba8601"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.643131 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zpjhx" event={"ID":"4fdc6f8c-1c91-45d3-80bc-d07c5953a385","Type":"ContainerStarted","Data":"bd430b9b7cb485334722137314bd920920e251188a54119a9ef666b1d4dba06d"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.645823 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9f95b955d-vptlr" event={"ID":"c7e77464-d0e5-4a9b-97a1-557cac810654","Type":"ContainerDied","Data":"6d82590db7bcb61c86326bd78a7086f9f1282ceb3a193f4a5f2b963338ffbf54"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.645896 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9f95b955d-vptlr" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.685941 5055 generic.go:334] "Generic (PLEG): container finished" podID="71344487-c426-47fe-85cb-927c97465a5b" containerID="8445d4a2e1d0c3a546cd9368a1877165d4bf7e98c38114091de01f3e6ca9a695" exitCode=0 Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.686023 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"71344487-c426-47fe-85cb-927c97465a5b","Type":"ContainerDied","Data":"8445d4a2e1d0c3a546cd9368a1877165d4bf7e98c38114091de01f3e6ca9a695"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.699952 5055 generic.go:334] "Generic (PLEG): container finished" podID="1bebbd2e-a315-493d-820b-69e8dc749ee1" containerID="10d71784edd3f6ec2fbce87c08a6c3ea956ccb7e0b266974fc673f0bc6f0afd2" exitCode=0 Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.700016 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"1bebbd2e-a315-493d-820b-69e8dc749ee1","Type":"ContainerDied","Data":"10d71784edd3f6ec2fbce87c08a6c3ea956ccb7e0b266974fc673f0bc6f0afd2"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.700043 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"1bebbd2e-a315-493d-820b-69e8dc749ee1","Type":"ContainerDied","Data":"ac9047c1f2ad34b0364dff7b138c6c13158775b692eee5cdd0bb757560a5f6c8"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.700055 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ac9047c1f2ad34b0364dff7b138c6c13158775b692eee5cdd0bb757560a5f6c8" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.706540 5055 generic.go:334] "Generic (PLEG): container finished" podID="0bd3f46f-87e3-42e7-a37d-e746fb601f60" containerID="161a466facbf08e74bf08d457e0525bdf9302acd69cb47d57a131bf40297c7a8" exitCode=0 Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.706595 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fffd4488c-r5vkt" event={"ID":"0bd3f46f-87e3-42e7-a37d-e746fb601f60","Type":"ContainerDied","Data":"161a466facbf08e74bf08d457e0525bdf9302acd69cb47d57a131bf40297c7a8"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.712056 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance5ef3-account-delete-px4qv" event={"ID":"f360ed99-6efd-4281-a18b-7597df5341b6","Type":"ContainerDied","Data":"58423930f226c4be75a7e72d808276dfe1e3f0699e5542c814dd6b4780f9542d"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.712094 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="58423930f226c4be75a7e72d808276dfe1e3f0699e5542c814dd6b4780f9542d" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.716912 5055 generic.go:334] "Generic (PLEG): container finished" podID="50ae0390-2ce4-47fe-88af-831aee265a6c" containerID="d7f9eac5a0e6f11b97625c4fa2e0f22db3b2a7d4736d79f64658b64c84d7459c" exitCode=0 Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.717014 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"50ae0390-2ce4-47fe-88af-831aee265a6c","Type":"ContainerDied","Data":"d7f9eac5a0e6f11b97625c4fa2e0f22db3b2a7d4736d79f64658b64c84d7459c"} Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.869083 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="056ae819-c243-4aa7-8214-1432f198dcab" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": read tcp 10.217.0.2:42948->10.217.0.201:8775: read: connection reset by peer" Oct 11 07:17:52 crc kubenswrapper[5055]: I1011 07:17:52.869100 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="056ae819-c243-4aa7-8214-1432f198dcab" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.201:8775/\": read tcp 10.217.0.2:42960->10.217.0.201:8775: read: connection reset by peer" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.014092 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4dcbfb53-b641-4aa7-91e3-43225acb15f4" path="/var/lib/kubelet/pods/4dcbfb53-b641-4aa7-91e3-43225acb15f4/volumes" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.014683 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f76ee4c-bc76-492a-898d-def8ec69e291" path="/var/lib/kubelet/pods/5f76ee4c-bc76-492a-898d-def8ec69e291/volumes" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.015282 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6899c71e-b2d8-45f6-bc9b-e37542b5f544" path="/var/lib/kubelet/pods/6899c71e-b2d8-45f6-bc9b-e37542b5f544/volumes" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.015801 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7cf968bd-0ddb-4647-8c76-8038cb19d053" path="/var/lib/kubelet/pods/7cf968bd-0ddb-4647-8c76-8038cb19d053/volumes" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.018989 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7" path="/var/lib/kubelet/pods/84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7/volumes" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.019460 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4f4d49-efc9-44ef-a120-b8eda868d552" path="/var/lib/kubelet/pods/9d4f4d49-efc9-44ef-a120-b8eda868d552/volumes" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.019920 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9eb6d469-724e-4dc7-943c-60454db39c60" path="/var/lib/kubelet/pods/9eb6d469-724e-4dc7-943c-60454db39c60/volumes" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.020941 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6501c99-b98e-4311-a419-5ed815080287" path="/var/lib/kubelet/pods/a6501c99-b98e-4311-a419-5ed815080287/volumes" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.021381 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acf7edb2-ebde-4783-b428-b6ce809be9f9" path="/var/lib/kubelet/pods/acf7edb2-ebde-4783-b428-b6ce809be9f9/volumes" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.021873 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad9b1fb9-5569-4981-ba98-1c45da1ba757" path="/var/lib/kubelet/pods/ad9b1fb9-5569-4981-ba98-1c45da1ba757/volumes" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.022307 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c647ccff-95d1-467a-9fcd-4b5110772361" path="/var/lib/kubelet/pods/c647ccff-95d1-467a-9fcd-4b5110772361/volumes" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.023669 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8184ab7-72ce-4337-9e27-a04849f1e796" path="/var/lib/kubelet/pods/e8184ab7-72ce-4337-9e27-a04849f1e796/volumes" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.024208 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f99e172e-068d-423a-b797-2467e352a41b" path="/var/lib/kubelet/pods/f99e172e-068d-423a-b797-2467e352a41b/volumes" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.065108 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.083083 5055 scope.go:117] "RemoveContainer" containerID="d21aed6aaefbc9580be9ca7f2e6eb8ebd1a3f5f50e101c4c845a2f3a24925d68" Oct 11 07:17:53 crc kubenswrapper[5055]: E1011 07:17:53.088140 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d21aed6aaefbc9580be9ca7f2e6eb8ebd1a3f5f50e101c4c845a2f3a24925d68\": container with ID starting with d21aed6aaefbc9580be9ca7f2e6eb8ebd1a3f5f50e101c4c845a2f3a24925d68 not found: ID does not exist" containerID="d21aed6aaefbc9580be9ca7f2e6eb8ebd1a3f5f50e101c4c845a2f3a24925d68" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.088222 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d21aed6aaefbc9580be9ca7f2e6eb8ebd1a3f5f50e101c4c845a2f3a24925d68"} err="failed to get container status \"d21aed6aaefbc9580be9ca7f2e6eb8ebd1a3f5f50e101c4c845a2f3a24925d68\": rpc error: code = NotFound desc = could not find container \"d21aed6aaefbc9580be9ca7f2e6eb8ebd1a3f5f50e101c4c845a2f3a24925d68\": container with ID starting with d21aed6aaefbc9580be9ca7f2e6eb8ebd1a3f5f50e101c4c845a2f3a24925d68 not found: ID does not exist" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.088275 5055 scope.go:117] "RemoveContainer" containerID="6c92db08f0560ec22ab28b315f7ec817b7f7ae5ff9b9adbe69e0c475a691fdb1" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.113912 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance5ef3-account-delete-px4qv" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.117734 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1bebbd2e-a315-493d-820b-69e8dc749ee1-kolla-config\") pod \"1bebbd2e-a315-493d-820b-69e8dc749ee1\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.117854 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bebbd2e-a315-493d-820b-69e8dc749ee1-combined-ca-bundle\") pod \"1bebbd2e-a315-493d-820b-69e8dc749ee1\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.118454 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1bebbd2e-a315-493d-820b-69e8dc749ee1-config-data\") pod \"1bebbd2e-a315-493d-820b-69e8dc749ee1\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.118550 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bebbd2e-a315-493d-820b-69e8dc749ee1-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "1bebbd2e-a315-493d-820b-69e8dc749ee1" (UID: "1bebbd2e-a315-493d-820b-69e8dc749ee1"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.119241 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wl6sj\" (UniqueName: \"kubernetes.io/projected/1bebbd2e-a315-493d-820b-69e8dc749ee1-kube-api-access-wl6sj\") pod \"1bebbd2e-a315-493d-820b-69e8dc749ee1\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.119313 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/1bebbd2e-a315-493d-820b-69e8dc749ee1-memcached-tls-certs\") pod \"1bebbd2e-a315-493d-820b-69e8dc749ee1\" (UID: \"1bebbd2e-a315-493d-820b-69e8dc749ee1\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.119458 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jpwcq\" (UniqueName: \"kubernetes.io/projected/f360ed99-6efd-4281-a18b-7597df5341b6-kube-api-access-jpwcq\") pod \"f360ed99-6efd-4281-a18b-7597df5341b6\" (UID: \"f360ed99-6efd-4281-a18b-7597df5341b6\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.119649 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bebbd2e-a315-493d-820b-69e8dc749ee1-config-data" (OuterVolumeSpecName: "config-data") pod "1bebbd2e-a315-493d-820b-69e8dc749ee1" (UID: "1bebbd2e-a315-493d-820b-69e8dc749ee1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.121612 5055 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1bebbd2e-a315-493d-820b-69e8dc749ee1-kolla-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.125552 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bebbd2e-a315-493d-820b-69e8dc749ee1-kube-api-access-wl6sj" (OuterVolumeSpecName: "kube-api-access-wl6sj") pod "1bebbd2e-a315-493d-820b-69e8dc749ee1" (UID: "1bebbd2e-a315-493d-820b-69e8dc749ee1"). InnerVolumeSpecName "kube-api-access-wl6sj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.127299 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1bebbd2e-a315-493d-820b-69e8dc749ee1-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.130269 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f360ed99-6efd-4281-a18b-7597df5341b6-kube-api-access-jpwcq" (OuterVolumeSpecName: "kube-api-access-jpwcq") pod "f360ed99-6efd-4281-a18b-7597df5341b6" (UID: "f360ed99-6efd-4281-a18b-7597df5341b6"). InnerVolumeSpecName "kube-api-access-jpwcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.136954 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.156850 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.173313 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-9f95b955d-vptlr"] Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.177723 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bebbd2e-a315-493d-820b-69e8dc749ee1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1bebbd2e-a315-493d-820b-69e8dc749ee1" (UID: "1bebbd2e-a315-493d-820b-69e8dc749ee1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.185903 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bebbd2e-a315-493d-820b-69e8dc749ee1-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "1bebbd2e-a315-493d-820b-69e8dc749ee1" (UID: "1bebbd2e-a315-493d-820b-69e8dc749ee1"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.196642 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-9f95b955d-vptlr"] Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.204353 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0372a-account-delete-68qnz"] Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.216004 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell0372a-account-delete-68qnz"] Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.222328 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-66cb796856-btqn8" podUID="324f6d50-368a-4f8c-8cee-4a9ff543ba31" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.154:9311/healthcheck\": dial tcp 10.217.0.154:9311: connect: connection refused" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.222797 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-66cb796856-btqn8" podUID="324f6d50-368a-4f8c-8cee-4a9ff543ba31" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.154:9311/healthcheck\": dial tcp 10.217.0.154:9311: connect: connection refused" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.228530 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jpwcq\" (UniqueName: \"kubernetes.io/projected/f360ed99-6efd-4281-a18b-7597df5341b6-kube-api-access-jpwcq\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.228676 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bebbd2e-a315-493d-820b-69e8dc749ee1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.228689 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wl6sj\" (UniqueName: \"kubernetes.io/projected/1bebbd2e-a315-493d-820b-69e8dc749ee1-kube-api-access-wl6sj\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.228704 5055 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/1bebbd2e-a315-493d-820b-69e8dc749ee1-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: E1011 07:17:53.246652 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cf5f42235a42f219aee9d395cf87f7e377aaf80da0d2626e197e998f6d477ba8 is running failed: container process not found" containerID="cf5f42235a42f219aee9d395cf87f7e377aaf80da0d2626e197e998f6d477ba8" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 11 07:17:53 crc kubenswrapper[5055]: E1011 07:17:53.247004 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cf5f42235a42f219aee9d395cf87f7e377aaf80da0d2626e197e998f6d477ba8 is running failed: container process not found" containerID="cf5f42235a42f219aee9d395cf87f7e377aaf80da0d2626e197e998f6d477ba8" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 11 07:17:53 crc kubenswrapper[5055]: E1011 07:17:53.247280 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cf5f42235a42f219aee9d395cf87f7e377aaf80da0d2626e197e998f6d477ba8 is running failed: container process not found" containerID="cf5f42235a42f219aee9d395cf87f7e377aaf80da0d2626e197e998f6d477ba8" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 11 07:17:53 crc kubenswrapper[5055]: E1011 07:17:53.247312 5055 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of cf5f42235a42f219aee9d395cf87f7e377aaf80da0d2626e197e998f6d477ba8 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="e2e0c881-0205-4317-ba03-cfb76f0f69e6" containerName="nova-scheduler-scheduler" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.351500 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.382826 5055 scope.go:117] "RemoveContainer" containerID="0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.386556 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.398632 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.408717 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.426540 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.429915 5055 scope.go:117] "RemoveContainer" containerID="6c92db08f0560ec22ab28b315f7ec817b7f7ae5ff9b9adbe69e0c475a691fdb1" Oct 11 07:17:53 crc kubenswrapper[5055]: E1011 07:17:53.430972 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c92db08f0560ec22ab28b315f7ec817b7f7ae5ff9b9adbe69e0c475a691fdb1\": container with ID starting with 6c92db08f0560ec22ab28b315f7ec817b7f7ae5ff9b9adbe69e0c475a691fdb1 not found: ID does not exist" containerID="6c92db08f0560ec22ab28b315f7ec817b7f7ae5ff9b9adbe69e0c475a691fdb1" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.431006 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c92db08f0560ec22ab28b315f7ec817b7f7ae5ff9b9adbe69e0c475a691fdb1"} err="failed to get container status \"6c92db08f0560ec22ab28b315f7ec817b7f7ae5ff9b9adbe69e0c475a691fdb1\": rpc error: code = NotFound desc = could not find container \"6c92db08f0560ec22ab28b315f7ec817b7f7ae5ff9b9adbe69e0c475a691fdb1\": container with ID starting with 6c92db08f0560ec22ab28b315f7ec817b7f7ae5ff9b9adbe69e0c475a691fdb1 not found: ID does not exist" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.431031 5055 scope.go:117] "RemoveContainer" containerID="0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4" Oct 11 07:17:53 crc kubenswrapper[5055]: E1011 07:17:53.432251 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4\": container with ID starting with 0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4 not found: ID does not exist" containerID="0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.432275 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4"} err="failed to get container status \"0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4\": rpc error: code = NotFound desc = could not find container \"0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4\": container with ID starting with 0365fb3975bac4375c5ba4b06bb7798b9630bb637d5998b7defd097577486cc4 not found: ID does not exist" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.432289 5055 scope.go:117] "RemoveContainer" containerID="21e1fddac948ab938719326e70c908df0e32494cb87949b6a596230d110665c1" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433472 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-combined-ca-bundle\") pod \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433531 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-public-tls-certs\") pod \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433571 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-combined-ca-bundle\") pod \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433596 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmjln\" (UniqueName: \"kubernetes.io/projected/8950328a-3275-4ba9-8bd8-ea4940f2eaac-kube-api-access-lmjln\") pod \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433614 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-config-data\") pod \"71344487-c426-47fe-85cb-927c97465a5b\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433642 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8950328a-3275-4ba9-8bd8-ea4940f2eaac-logs\") pod \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433660 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/71344487-c426-47fe-85cb-927c97465a5b-httpd-run\") pod \"71344487-c426-47fe-85cb-927c97465a5b\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433677 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-state-metrics-tls-config\") pod \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433707 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-scripts\") pod \"71344487-c426-47fe-85cb-927c97465a5b\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433735 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttqs6\" (UniqueName: \"kubernetes.io/projected/6a52d356-2098-4a61-be67-5519a2d2b78f-kube-api-access-ttqs6\") pod \"6a52d356-2098-4a61-be67-5519a2d2b78f\" (UID: \"6a52d356-2098-4a61-be67-5519a2d2b78f\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433751 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"71344487-c426-47fe-85cb-927c97465a5b\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433789 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-combined-ca-bundle\") pod \"71344487-c426-47fe-85cb-927c97465a5b\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433820 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bddg\" (UniqueName: \"kubernetes.io/projected/71344487-c426-47fe-85cb-927c97465a5b-kube-api-access-8bddg\") pod \"71344487-c426-47fe-85cb-927c97465a5b\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433838 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-internal-tls-certs\") pod \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433854 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a52d356-2098-4a61-be67-5519a2d2b78f-config-data\") pod \"6a52d356-2098-4a61-be67-5519a2d2b78f\" (UID: \"6a52d356-2098-4a61-be67-5519a2d2b78f\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433871 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxnp9\" (UniqueName: \"kubernetes.io/projected/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-api-access-rxnp9\") pod \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433916 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8950328a-3275-4ba9-8bd8-ea4940f2eaac-etc-machine-id\") pod \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433950 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-public-tls-certs\") pod \"71344487-c426-47fe-85cb-927c97465a5b\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433968 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-scripts\") pod \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.433985 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a52d356-2098-4a61-be67-5519a2d2b78f-combined-ca-bundle\") pod \"6a52d356-2098-4a61-be67-5519a2d2b78f\" (UID: \"6a52d356-2098-4a61-be67-5519a2d2b78f\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.434022 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-config-data\") pod \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.434054 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-config-data-custom\") pod \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\" (UID: \"8950328a-3275-4ba9-8bd8-ea4940f2eaac\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.434094 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/71344487-c426-47fe-85cb-927c97465a5b-logs\") pod \"71344487-c426-47fe-85cb-927c97465a5b\" (UID: \"71344487-c426-47fe-85cb-927c97465a5b\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.434111 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-state-metrics-tls-certs\") pod \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\" (UID: \"af2ab03e-3011-4d6c-bbd5-ac5453d7785d\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.434197 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8950328a-3275-4ba9-8bd8-ea4940f2eaac-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "8950328a-3275-4ba9-8bd8-ea4940f2eaac" (UID: "8950328a-3275-4ba9-8bd8-ea4940f2eaac"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.434491 5055 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8950328a-3275-4ba9-8bd8-ea4940f2eaac-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.440310 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "71344487-c426-47fe-85cb-927c97465a5b" (UID: "71344487-c426-47fe-85cb-927c97465a5b"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.451846 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.462066 5055 scope.go:117] "RemoveContainer" containerID="21e1fddac948ab938719326e70c908df0e32494cb87949b6a596230d110665c1" Oct 11 07:17:53 crc kubenswrapper[5055]: E1011 07:17:53.471254 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21e1fddac948ab938719326e70c908df0e32494cb87949b6a596230d110665c1\": container with ID starting with 21e1fddac948ab938719326e70c908df0e32494cb87949b6a596230d110665c1 not found: ID does not exist" containerID="21e1fddac948ab938719326e70c908df0e32494cb87949b6a596230d110665c1" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.471297 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21e1fddac948ab938719326e70c908df0e32494cb87949b6a596230d110665c1"} err="failed to get container status \"21e1fddac948ab938719326e70c908df0e32494cb87949b6a596230d110665c1\": rpc error: code = NotFound desc = could not find container \"21e1fddac948ab938719326e70c908df0e32494cb87949b6a596230d110665c1\": container with ID starting with 21e1fddac948ab938719326e70c908df0e32494cb87949b6a596230d110665c1 not found: ID does not exist" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.471320 5055 scope.go:117] "RemoveContainer" containerID="5b7de1ee0c33bc352a3efec430407c4b7fc6aa49d8e39cef8efbd11931ec62fb" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.475173 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71344487-c426-47fe-85cb-927c97465a5b-logs" (OuterVolumeSpecName: "logs") pod "71344487-c426-47fe-85cb-927c97465a5b" (UID: "71344487-c426-47fe-85cb-927c97465a5b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.478072 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71344487-c426-47fe-85cb-927c97465a5b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "71344487-c426-47fe-85cb-927c97465a5b" (UID: "71344487-c426-47fe-85cb-927c97465a5b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.478344 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8950328a-3275-4ba9-8bd8-ea4940f2eaac-logs" (OuterVolumeSpecName: "logs") pod "8950328a-3275-4ba9-8bd8-ea4940f2eaac" (UID: "8950328a-3275-4ba9-8bd8-ea4940f2eaac"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.485068 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a52d356-2098-4a61-be67-5519a2d2b78f-kube-api-access-ttqs6" (OuterVolumeSpecName: "kube-api-access-ttqs6") pod "6a52d356-2098-4a61-be67-5519a2d2b78f" (UID: "6a52d356-2098-4a61-be67-5519a2d2b78f"). InnerVolumeSpecName "kube-api-access-ttqs6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.486814 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.520494 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-scripts" (OuterVolumeSpecName: "scripts") pod "71344487-c426-47fe-85cb-927c97465a5b" (UID: "71344487-c426-47fe-85cb-927c97465a5b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.520547 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-scripts" (OuterVolumeSpecName: "scripts") pod "8950328a-3275-4ba9-8bd8-ea4940f2eaac" (UID: "8950328a-3275-4ba9-8bd8-ea4940f2eaac"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.523708 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8950328a-3275-4ba9-8bd8-ea4940f2eaac" (UID: "8950328a-3275-4ba9-8bd8-ea4940f2eaac"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.525377 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8950328a-3275-4ba9-8bd8-ea4940f2eaac-kube-api-access-lmjln" (OuterVolumeSpecName: "kube-api-access-lmjln") pod "8950328a-3275-4ba9-8bd8-ea4940f2eaac" (UID: "8950328a-3275-4ba9-8bd8-ea4940f2eaac"). InnerVolumeSpecName "kube-api-access-lmjln". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.525904 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-api-access-rxnp9" (OuterVolumeSpecName: "kube-api-access-rxnp9") pod "af2ab03e-3011-4d6c-bbd5-ac5453d7785d" (UID: "af2ab03e-3011-4d6c-bbd5-ac5453d7785d"). InnerVolumeSpecName "kube-api-access-rxnp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.526085 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.526871 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71344487-c426-47fe-85cb-927c97465a5b-kube-api-access-8bddg" (OuterVolumeSpecName: "kube-api-access-8bddg") pod "71344487-c426-47fe-85cb-927c97465a5b" (UID: "71344487-c426-47fe-85cb-927c97465a5b"). InnerVolumeSpecName "kube-api-access-8bddg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.528455 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi9573-account-delete-4cwkv" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.535945 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hw6mx\" (UniqueName: \"kubernetes.io/projected/e2e0c881-0205-4317-ba03-cfb76f0f69e6-kube-api-access-hw6mx\") pod \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\" (UID: \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536007 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"50ae0390-2ce4-47fe-88af-831aee265a6c\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536042 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-config-data-custom\") pod \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536073 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2e0c881-0205-4317-ba03-cfb76f0f69e6-combined-ca-bundle\") pod \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\" (UID: \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536093 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-config-data\") pod \"50ae0390-2ce4-47fe-88af-831aee265a6c\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536109 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2e0c881-0205-4317-ba03-cfb76f0f69e6-config-data\") pod \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\" (UID: \"e2e0c881-0205-4317-ba03-cfb76f0f69e6\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536148 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50ae0390-2ce4-47fe-88af-831aee265a6c-logs\") pod \"50ae0390-2ce4-47fe-88af-831aee265a6c\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536171 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-combined-ca-bundle\") pod \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536193 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-internal-tls-certs\") pod \"50ae0390-2ce4-47fe-88af-831aee265a6c\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536232 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-combined-ca-bundle\") pod \"50ae0390-2ce4-47fe-88af-831aee265a6c\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536329 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd3f46f-87e3-42e7-a37d-e746fb601f60-logs\") pod \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536392 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-scripts\") pod \"50ae0390-2ce4-47fe-88af-831aee265a6c\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536430 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/50ae0390-2ce4-47fe-88af-831aee265a6c-httpd-run\") pod \"50ae0390-2ce4-47fe-88af-831aee265a6c\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536462 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-config-data\") pod \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536485 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwj4r\" (UniqueName: \"kubernetes.io/projected/50ae0390-2ce4-47fe-88af-831aee265a6c-kube-api-access-xwj4r\") pod \"50ae0390-2ce4-47fe-88af-831aee265a6c\" (UID: \"50ae0390-2ce4-47fe-88af-831aee265a6c\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536535 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pr6n5\" (UniqueName: \"kubernetes.io/projected/0bd3f46f-87e3-42e7-a37d-e746fb601f60-kube-api-access-pr6n5\") pod \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\" (UID: \"0bd3f46f-87e3-42e7-a37d-e746fb601f60\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536601 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8950328a-3275-4ba9-8bd8-ea4940f2eaac" (UID: "8950328a-3275-4ba9-8bd8-ea4940f2eaac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.536960 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50ae0390-2ce4-47fe-88af-831aee265a6c-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "50ae0390-2ce4-47fe-88af-831aee265a6c" (UID: "50ae0390-2ce4-47fe-88af-831aee265a6c"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537211 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537236 5055 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537249 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/71344487-c426-47fe-85cb-927c97465a5b-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537259 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537267 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmjln\" (UniqueName: \"kubernetes.io/projected/8950328a-3275-4ba9-8bd8-ea4940f2eaac-kube-api-access-lmjln\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537275 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8950328a-3275-4ba9-8bd8-ea4940f2eaac-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537283 5055 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/71344487-c426-47fe-85cb-927c97465a5b-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537292 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537300 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttqs6\" (UniqueName: \"kubernetes.io/projected/6a52d356-2098-4a61-be67-5519a2d2b78f-kube-api-access-ttqs6\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537319 5055 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537329 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bddg\" (UniqueName: \"kubernetes.io/projected/71344487-c426-47fe-85cb-927c97465a5b-kube-api-access-8bddg\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537339 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxnp9\" (UniqueName: \"kubernetes.io/projected/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-api-access-rxnp9\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537347 5055 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/50ae0390-2ce4-47fe-88af-831aee265a6c-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537395 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0bd3f46f-87e3-42e7-a37d-e746fb601f60-logs" (OuterVolumeSpecName: "logs") pod "0bd3f46f-87e3-42e7-a37d-e746fb601f60" (UID: "0bd3f46f-87e3-42e7-a37d-e746fb601f60"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.537574 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50ae0390-2ce4-47fe-88af-831aee265a6c-logs" (OuterVolumeSpecName: "logs") pod "50ae0390-2ce4-47fe-88af-831aee265a6c" (UID: "50ae0390-2ce4-47fe-88af-831aee265a6c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.555780 5055 scope.go:117] "RemoveContainer" containerID="5433269dd3db71b29ff92097b8c5e94a07217bab29089e9289f976033732e76a" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.561238 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0bd3f46f-87e3-42e7-a37d-e746fb601f60" (UID: "0bd3f46f-87e3-42e7-a37d-e746fb601f60"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.566940 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "50ae0390-2ce4-47fe-88af-831aee265a6c" (UID: "50ae0390-2ce4-47fe-88af-831aee265a6c"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.577272 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2e0c881-0205-4317-ba03-cfb76f0f69e6-kube-api-access-hw6mx" (OuterVolumeSpecName: "kube-api-access-hw6mx") pod "e2e0c881-0205-4317-ba03-cfb76f0f69e6" (UID: "e2e0c881-0205-4317-ba03-cfb76f0f69e6"). InnerVolumeSpecName "kube-api-access-hw6mx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.580412 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bd3f46f-87e3-42e7-a37d-e746fb601f60-kube-api-access-pr6n5" (OuterVolumeSpecName: "kube-api-access-pr6n5") pod "0bd3f46f-87e3-42e7-a37d-e746fb601f60" (UID: "0bd3f46f-87e3-42e7-a37d-e746fb601f60"). InnerVolumeSpecName "kube-api-access-pr6n5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.582189 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50ae0390-2ce4-47fe-88af-831aee265a6c-kube-api-access-xwj4r" (OuterVolumeSpecName: "kube-api-access-xwj4r") pod "50ae0390-2ce4-47fe-88af-831aee265a6c" (UID: "50ae0390-2ce4-47fe-88af-831aee265a6c"). InnerVolumeSpecName "kube-api-access-xwj4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.584206 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.584392 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-scripts" (OuterVolumeSpecName: "scripts") pod "50ae0390-2ce4-47fe-88af-831aee265a6c" (UID: "50ae0390-2ce4-47fe-88af-831aee265a6c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.609273 5055 scope.go:117] "RemoveContainer" containerID="20eedda3bec3e7043f4bf711e2094fda9e39ca41bced8fbfd357cf964974b401" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.614010 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.637885 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/324f6d50-368a-4f8c-8cee-4a9ff543ba31-logs\") pod \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.637929 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-combined-ca-bundle\") pod \"056ae819-c243-4aa7-8214-1432f198dcab\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.637954 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-combined-ca-bundle\") pod \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.637987 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/056ae819-c243-4aa7-8214-1432f198dcab-logs\") pod \"056ae819-c243-4aa7-8214-1432f198dcab\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638043 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-public-tls-certs\") pod \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638099 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-internal-tls-certs\") pod \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638117 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-config-data-custom\") pod \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638135 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-scripts\") pod \"3cc22641-a524-415a-8377-580664ed5d90\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638158 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lthfp\" (UniqueName: \"kubernetes.io/projected/056ae819-c243-4aa7-8214-1432f198dcab-kube-api-access-lthfp\") pod \"056ae819-c243-4aa7-8214-1432f198dcab\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638231 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjdwx\" (UniqueName: \"kubernetes.io/projected/3cc22641-a524-415a-8377-580664ed5d90-kube-api-access-sjdwx\") pod \"3cc22641-a524-415a-8377-580664ed5d90\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638269 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-sg-core-conf-yaml\") pod \"3cc22641-a524-415a-8377-580664ed5d90\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638397 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-config-data\") pod \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638418 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-ceilometer-tls-certs\") pod \"3cc22641-a524-415a-8377-580664ed5d90\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638452 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-config-data\") pod \"3cc22641-a524-415a-8377-580664ed5d90\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638475 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3cc22641-a524-415a-8377-580664ed5d90-log-httpd\") pod \"3cc22641-a524-415a-8377-580664ed5d90\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638542 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3cc22641-a524-415a-8377-580664ed5d90-run-httpd\") pod \"3cc22641-a524-415a-8377-580664ed5d90\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638573 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-nova-metadata-tls-certs\") pod \"056ae819-c243-4aa7-8214-1432f198dcab\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638604 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-config-data\") pod \"056ae819-c243-4aa7-8214-1432f198dcab\" (UID: \"056ae819-c243-4aa7-8214-1432f198dcab\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638640 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zc4n\" (UniqueName: \"kubernetes.io/projected/4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241-kube-api-access-7zc4n\") pod \"4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241\" (UID: \"4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638665 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8d8rc\" (UniqueName: \"kubernetes.io/projected/324f6d50-368a-4f8c-8cee-4a9ff543ba31-kube-api-access-8d8rc\") pod \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\" (UID: \"324f6d50-368a-4f8c-8cee-4a9ff543ba31\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.638683 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-combined-ca-bundle\") pod \"3cc22641-a524-415a-8377-580664ed5d90\" (UID: \"3cc22641-a524-415a-8377-580664ed5d90\") " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.639000 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50ae0390-2ce4-47fe-88af-831aee265a6c-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.639015 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bd3f46f-87e3-42e7-a37d-e746fb601f60-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.639023 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.639032 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwj4r\" (UniqueName: \"kubernetes.io/projected/50ae0390-2ce4-47fe-88af-831aee265a6c-kube-api-access-xwj4r\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.639042 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pr6n5\" (UniqueName: \"kubernetes.io/projected/0bd3f46f-87e3-42e7-a37d-e746fb601f60-kube-api-access-pr6n5\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.639051 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hw6mx\" (UniqueName: \"kubernetes.io/projected/e2e0c881-0205-4317-ba03-cfb76f0f69e6-kube-api-access-hw6mx\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.639069 5055 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.639079 5055 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.639624 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3cc22641-a524-415a-8377-580664ed5d90-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3cc22641-a524-415a-8377-580664ed5d90" (UID: "3cc22641-a524-415a-8377-580664ed5d90"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.640223 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3cc22641-a524-415a-8377-580664ed5d90-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3cc22641-a524-415a-8377-580664ed5d90" (UID: "3cc22641-a524-415a-8377-580664ed5d90"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.640816 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/056ae819-c243-4aa7-8214-1432f198dcab-logs" (OuterVolumeSpecName: "logs") pod "056ae819-c243-4aa7-8214-1432f198dcab" (UID: "056ae819-c243-4aa7-8214-1432f198dcab"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: E1011 07:17:53.643837 5055 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Oct 11 07:17:53 crc kubenswrapper[5055]: E1011 07:17:53.643916 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data podName:6baacc00-a270-4662-ba67-aad18287df2c nodeName:}" failed. No retries permitted until 2025-10-11 07:18:01.643895215 +0000 UTC m=+1465.418169092 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data") pod "rabbitmq-cell1-server-0" (UID: "6baacc00-a270-4662-ba67-aad18287df2c") : configmap "rabbitmq-cell1-config-data" not found Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.644677 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/324f6d50-368a-4f8c-8cee-4a9ff543ba31-logs" (OuterVolumeSpecName: "logs") pod "324f6d50-368a-4f8c-8cee-4a9ff543ba31" (UID: "324f6d50-368a-4f8c-8cee-4a9ff543ba31"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.654095 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cc22641-a524-415a-8377-580664ed5d90-kube-api-access-sjdwx" (OuterVolumeSpecName: "kube-api-access-sjdwx") pod "3cc22641-a524-415a-8377-580664ed5d90" (UID: "3cc22641-a524-415a-8377-580664ed5d90"). InnerVolumeSpecName "kube-api-access-sjdwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.654621 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "324f6d50-368a-4f8c-8cee-4a9ff543ba31" (UID: "324f6d50-368a-4f8c-8cee-4a9ff543ba31"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.657675 5055 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.664582 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/324f6d50-368a-4f8c-8cee-4a9ff543ba31-kube-api-access-8d8rc" (OuterVolumeSpecName: "kube-api-access-8d8rc") pod "324f6d50-368a-4f8c-8cee-4a9ff543ba31" (UID: "324f6d50-368a-4f8c-8cee-4a9ff543ba31"). InnerVolumeSpecName "kube-api-access-8d8rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.664702 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-scripts" (OuterVolumeSpecName: "scripts") pod "3cc22641-a524-415a-8377-580664ed5d90" (UID: "3cc22641-a524-415a-8377-580664ed5d90"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.667004 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/056ae819-c243-4aa7-8214-1432f198dcab-kube-api-access-lthfp" (OuterVolumeSpecName: "kube-api-access-lthfp") pod "056ae819-c243-4aa7-8214-1432f198dcab" (UID: "056ae819-c243-4aa7-8214-1432f198dcab"). InnerVolumeSpecName "kube-api-access-lthfp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.667931 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241-kube-api-access-7zc4n" (OuterVolumeSpecName: "kube-api-access-7zc4n") pod "4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241" (UID: "4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241"). InnerVolumeSpecName "kube-api-access-7zc4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.679994 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "71344487-c426-47fe-85cb-927c97465a5b" (UID: "71344487-c426-47fe-85cb-927c97465a5b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.697346 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a52d356-2098-4a61-be67-5519a2d2b78f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6a52d356-2098-4a61-be67-5519a2d2b78f" (UID: "6a52d356-2098-4a61-be67-5519a2d2b78f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.704379 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0bd3f46f-87e3-42e7-a37d-e746fb601f60" (UID: "0bd3f46f-87e3-42e7-a37d-e746fb601f60"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.711448 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "af2ab03e-3011-4d6c-bbd5-ac5453d7785d" (UID: "af2ab03e-3011-4d6c-bbd5-ac5453d7785d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.718612 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "056ae819-c243-4aa7-8214-1432f198dcab" (UID: "056ae819-c243-4aa7-8214-1432f198dcab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.739840 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zc4n\" (UniqueName: \"kubernetes.io/projected/4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241-kube-api-access-7zc4n\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740085 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8d8rc\" (UniqueName: \"kubernetes.io/projected/324f6d50-368a-4f8c-8cee-4a9ff543ba31-kube-api-access-8d8rc\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740096 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740104 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/324f6d50-368a-4f8c-8cee-4a9ff543ba31-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740116 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740124 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/056ae819-c243-4aa7-8214-1432f198dcab-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740132 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740141 5055 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740149 5055 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740157 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740166 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740174 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lthfp\" (UniqueName: \"kubernetes.io/projected/056ae819-c243-4aa7-8214-1432f198dcab-kube-api-access-lthfp\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740182 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjdwx\" (UniqueName: \"kubernetes.io/projected/3cc22641-a524-415a-8377-580664ed5d90-kube-api-access-sjdwx\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740190 5055 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3cc22641-a524-415a-8377-580664ed5d90-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740199 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a52d356-2098-4a61-be67-5519a2d2b78f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.740206 5055 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3cc22641-a524-415a-8377-580664ed5d90-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.743856 5055 generic.go:334] "Generic (PLEG): container finished" podID="b6c56af9-552f-4b5a-9648-f28af1328534" containerID="5df485ac2f70f05a177e6e141ec2dc723c3f8a0fa7ae8342e45af4f8db485f9b" exitCode=0 Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.743911 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b6c56af9-552f-4b5a-9648-f28af1328534","Type":"ContainerDied","Data":"5df485ac2f70f05a177e6e141ec2dc723c3f8a0fa7ae8342e45af4f8db485f9b"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.745205 5055 generic.go:334] "Generic (PLEG): container finished" podID="324f6d50-368a-4f8c-8cee-4a9ff543ba31" containerID="b032f4e2acda19474410a349d97f1f7434164e628aa86411ceb38dcda72e0648" exitCode=0 Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.745242 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-66cb796856-btqn8" event={"ID":"324f6d50-368a-4f8c-8cee-4a9ff543ba31","Type":"ContainerDied","Data":"b032f4e2acda19474410a349d97f1f7434164e628aa86411ceb38dcda72e0648"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.745258 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-66cb796856-btqn8" event={"ID":"324f6d50-368a-4f8c-8cee-4a9ff543ba31","Type":"ContainerDied","Data":"84f912d588b362be8f4dff4b2de01bd13e69ccf55752b1ce067cbae1a93601ce"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.745310 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-66cb796856-btqn8" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.749848 5055 scope.go:117] "RemoveContainer" containerID="82a484cebdaeca44b956dd6026c1a5e65bf7f6fa2ec01d1293e086f33d6250f6" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.752382 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8950328a-3275-4ba9-8bd8-ea4940f2eaac","Type":"ContainerDied","Data":"f8000c8b2ce09867da6d5d6e824df4d623da61f2d0adbe03de44c59de1e0a250"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.752398 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.755421 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2e0c881-0205-4317-ba03-cfb76f0f69e6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e2e0c881-0205-4317-ba03-cfb76f0f69e6" (UID: "e2e0c881-0205-4317-ba03-cfb76f0f69e6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.755789 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "af2ab03e-3011-4d6c-bbd5-ac5453d7785d" (UID: "af2ab03e-3011-4d6c-bbd5-ac5453d7785d"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.757136 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7fffd4488c-r5vkt" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.757399 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fffd4488c-r5vkt" event={"ID":"0bd3f46f-87e3-42e7-a37d-e746fb601f60","Type":"ContainerDied","Data":"8f011a1e8c65e1d3151d79b7e649db7839c472218f188896199cf1e02f7d55de"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.764023 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"71344487-c426-47fe-85cb-927c97465a5b","Type":"ContainerDied","Data":"e2f4befcbc237d4c2d0f0d68a3e7207cb887cd5c2506e76bffacbd7d4b7c9663"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.764098 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.768009 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi9573-account-delete-4cwkv" event={"ID":"4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241","Type":"ContainerDied","Data":"21b9692b298274e8629ab582e9f1919ba611ba7e5957cd74e6760c445854cef3"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.768044 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21b9692b298274e8629ab582e9f1919ba611ba7e5957cd74e6760c445854cef3" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.768087 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi9573-account-delete-4cwkv" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.772173 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2e0c881-0205-4317-ba03-cfb76f0f69e6-config-data" (OuterVolumeSpecName: "config-data") pod "e2e0c881-0205-4317-ba03-cfb76f0f69e6" (UID: "e2e0c881-0205-4317-ba03-cfb76f0f69e6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.776625 5055 scope.go:117] "RemoveContainer" containerID="8d5ba7c659e482e9feef4958452dbc1a0c5e75bab6d21a07960f5948ca9ca733" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.776830 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"af2ab03e-3011-4d6c-bbd5-ac5453d7785d","Type":"ContainerDied","Data":"4225837af8e9089ccaa3d6d36348d97283f9a841dd3565bf33d45126d9944173"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.776903 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.779063 5055 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.780195 5055 generic.go:334] "Generic (PLEG): container finished" podID="056ae819-c243-4aa7-8214-1432f198dcab" containerID="4e2bb178a97af21102f507810962f80618de3fea812c149a4cec5443c0f81b8d" exitCode=0 Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.780254 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"056ae819-c243-4aa7-8214-1432f198dcab","Type":"ContainerDied","Data":"4e2bb178a97af21102f507810962f80618de3fea812c149a4cec5443c0f81b8d"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.780259 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.780273 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"056ae819-c243-4aa7-8214-1432f198dcab","Type":"ContainerDied","Data":"6a920aec6d20b4cebe6fa23903df2f90d9534ebdb36d531aa902730ca0b0408e"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.784753 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"50ae0390-2ce4-47fe-88af-831aee265a6c","Type":"ContainerDied","Data":"8bd668e519389fb3424ea402a537236d71ec966542274730f7043c399964922a"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.784945 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.786537 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8950328a-3275-4ba9-8bd8-ea4940f2eaac" (UID: "8950328a-3275-4ba9-8bd8-ea4940f2eaac"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.791977 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.792201 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6a52d356-2098-4a61-be67-5519a2d2b78f","Type":"ContainerDied","Data":"e743b4fba078ddd6b6a47cf961b951770d0460a80991f775d631a8a09c51cbf1"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.794885 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "324f6d50-368a-4f8c-8cee-4a9ff543ba31" (UID: "324f6d50-368a-4f8c-8cee-4a9ff543ba31"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.795448 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8950328a-3275-4ba9-8bd8-ea4940f2eaac" (UID: "8950328a-3275-4ba9-8bd8-ea4940f2eaac"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.804024 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3cc22641-a524-415a-8377-580664ed5d90","Type":"ContainerDied","Data":"e93036b1c1f05ad35709c2b098240496f0383f66af42c47a4063ed8587f8d4ed"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.804101 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.811815 5055 scope.go:117] "RemoveContainer" containerID="b032f4e2acda19474410a349d97f1f7434164e628aa86411ceb38dcda72e0648" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.817991 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.819450 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a52d356-2098-4a61-be67-5519a2d2b78f-config-data" (OuterVolumeSpecName: "config-data") pod "6a52d356-2098-4a61-be67-5519a2d2b78f" (UID: "6a52d356-2098-4a61-be67-5519a2d2b78f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.819523 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi9573-account-delete-4cwkv"] Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.819557 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e2e0c881-0205-4317-ba03-cfb76f0f69e6","Type":"ContainerDied","Data":"1d574623ada8580564af4480ec8905f977d1293c996b9e7d728ea9ba7d1939f9"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.828380 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "50ae0390-2ce4-47fe-88af-831aee265a6c" (UID: "50ae0390-2ce4-47fe-88af-831aee265a6c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.829802 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novaapi9573-account-delete-4cwkv"] Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.836106 5055 generic.go:334] "Generic (PLEG): container finished" podID="4fdc6f8c-1c91-45d3-80bc-d07c5953a385" containerID="07c511b3e3f69f863f49b552b08aeb22ae5ae648a9afae950120aff03963e1d2" exitCode=0 Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.836182 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zpjhx" event={"ID":"4fdc6f8c-1c91-45d3-80bc-d07c5953a385","Type":"ContainerDied","Data":"07c511b3e3f69f863f49b552b08aeb22ae5ae648a9afae950120aff03963e1d2"} Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.843885 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.843915 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2e0c881-0205-4317-ba03-cfb76f0f69e6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.843927 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2e0c881-0205-4317-ba03-cfb76f0f69e6-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.843942 5055 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.843953 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.843963 5055 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.843973 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a52d356-2098-4a61-be67-5519a2d2b78f-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.843985 5055 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.843993 5055 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: E1011 07:17:53.845581 5055 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Oct 11 07:17:53 crc kubenswrapper[5055]: E1011 07:17:53.845639 5055 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data podName:80597a79-e3fd-41cd-b035-a35494775fcb nodeName:}" failed. No retries permitted until 2025-10-11 07:18:01.845613955 +0000 UTC m=+1465.619887762 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data") pod "rabbitmq-server-0" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb") : configmap "rabbitmq-config-data" not found Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.848994 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance5ef3-account-delete-px4qv" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.849321 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.862560 5055 scope.go:117] "RemoveContainer" containerID="6bdcd9c1fc8e6d8238ec30f7f0ec872804f3202b031d16214896ea2c72adfe7c" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.899137 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.904816 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "324f6d50-368a-4f8c-8cee-4a9ff543ba31" (UID: "324f6d50-368a-4f8c-8cee-4a9ff543ba31"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.904831 5055 scope.go:117] "RemoveContainer" containerID="b032f4e2acda19474410a349d97f1f7434164e628aa86411ceb38dcda72e0648" Oct 11 07:17:53 crc kubenswrapper[5055]: E1011 07:17:53.907403 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b032f4e2acda19474410a349d97f1f7434164e628aa86411ceb38dcda72e0648\": container with ID starting with b032f4e2acda19474410a349d97f1f7434164e628aa86411ceb38dcda72e0648 not found: ID does not exist" containerID="b032f4e2acda19474410a349d97f1f7434164e628aa86411ceb38dcda72e0648" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.907441 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b032f4e2acda19474410a349d97f1f7434164e628aa86411ceb38dcda72e0648"} err="failed to get container status \"b032f4e2acda19474410a349d97f1f7434164e628aa86411ceb38dcda72e0648\": rpc error: code = NotFound desc = could not find container \"b032f4e2acda19474410a349d97f1f7434164e628aa86411ceb38dcda72e0648\": container with ID starting with b032f4e2acda19474410a349d97f1f7434164e628aa86411ceb38dcda72e0648 not found: ID does not exist" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.907467 5055 scope.go:117] "RemoveContainer" containerID="6bdcd9c1fc8e6d8238ec30f7f0ec872804f3202b031d16214896ea2c72adfe7c" Oct 11 07:17:53 crc kubenswrapper[5055]: E1011 07:17:53.907796 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6bdcd9c1fc8e6d8238ec30f7f0ec872804f3202b031d16214896ea2c72adfe7c\": container with ID starting with 6bdcd9c1fc8e6d8238ec30f7f0ec872804f3202b031d16214896ea2c72adfe7c not found: ID does not exist" containerID="6bdcd9c1fc8e6d8238ec30f7f0ec872804f3202b031d16214896ea2c72adfe7c" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.907825 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bdcd9c1fc8e6d8238ec30f7f0ec872804f3202b031d16214896ea2c72adfe7c"} err="failed to get container status \"6bdcd9c1fc8e6d8238ec30f7f0ec872804f3202b031d16214896ea2c72adfe7c\": rpc error: code = NotFound desc = could not find container \"6bdcd9c1fc8e6d8238ec30f7f0ec872804f3202b031d16214896ea2c72adfe7c\": container with ID starting with 6bdcd9c1fc8e6d8238ec30f7f0ec872804f3202b031d16214896ea2c72adfe7c not found: ID does not exist" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.907841 5055 scope.go:117] "RemoveContainer" containerID="ce9f9d6bb4b08df1523515f638c8c0fd3d6d8d86faabc0808907511f9e7af1e3" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.938552 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.940963 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "324f6d50-368a-4f8c-8cee-4a9ff543ba31" (UID: "324f6d50-368a-4f8c-8cee-4a9ff543ba31"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.945086 5055 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.945113 5055 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.949385 5055 scope.go:117] "RemoveContainer" containerID="3d5aad7ccd6e2d170e5b68480b570aef7d997fdd46672f0bca2c424d9434d752" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.958349 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance5ef3-account-delete-px4qv"] Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.970884 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance5ef3-account-delete-px4qv"] Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.974384 5055 scope.go:117] "RemoveContainer" containerID="161a466facbf08e74bf08d457e0525bdf9302acd69cb47d57a131bf40297c7a8" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.980995 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-config-data" (OuterVolumeSpecName: "config-data") pod "50ae0390-2ce4-47fe-88af-831aee265a6c" (UID: "50ae0390-2ce4-47fe-88af-831aee265a6c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.987960 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3cc22641-a524-415a-8377-580664ed5d90" (UID: "3cc22641-a524-415a-8377-580664ed5d90"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.994472 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "af2ab03e-3011-4d6c-bbd5-ac5453d7785d" (UID: "af2ab03e-3011-4d6c-bbd5-ac5453d7785d"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.996075 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "3cc22641-a524-415a-8377-580664ed5d90" (UID: "3cc22641-a524-415a-8377-580664ed5d90"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:53 crc kubenswrapper[5055]: I1011 07:17:53.996126 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.000492 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-config-data" (OuterVolumeSpecName: "config-data") pod "8950328a-3275-4ba9-8bd8-ea4940f2eaac" (UID: "8950328a-3275-4ba9-8bd8-ea4940f2eaac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.002876 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "056ae819-c243-4aa7-8214-1432f198dcab" (UID: "056ae819-c243-4aa7-8214-1432f198dcab"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.003487 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.003858 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3cc22641-a524-415a-8377-580664ed5d90" (UID: "3cc22641-a524-415a-8377-580664ed5d90"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.005896 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-config-data" (OuterVolumeSpecName: "config-data") pod "056ae819-c243-4aa7-8214-1432f198dcab" (UID: "056ae819-c243-4aa7-8214-1432f198dcab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.011078 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "71344487-c426-47fe-85cb-927c97465a5b" (UID: "71344487-c426-47fe-85cb-927c97465a5b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.025907 5055 scope.go:117] "RemoveContainer" containerID="5b162a0640d55adbc95b5344fee3401f099e891f722bbcd511184a279d6fc712" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.029568 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-config-data" (OuterVolumeSpecName: "config-data") pod "324f6d50-368a-4f8c-8cee-4a9ff543ba31" (UID: "324f6d50-368a-4f8c-8cee-4a9ff543ba31"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.038359 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-config-data" (OuterVolumeSpecName: "config-data") pod "71344487-c426-47fe-85cb-927c97465a5b" (UID: "71344487-c426-47fe-85cb-927c97465a5b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.047684 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-config-data" (OuterVolumeSpecName: "config-data") pod "0bd3f46f-87e3-42e7-a37d-e746fb601f60" (UID: "0bd3f46f-87e3-42e7-a37d-e746fb601f60"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.050317 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.050348 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.050362 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.050374 5055 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.050385 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/324f6d50-368a-4f8c-8cee-4a9ff543ba31-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.050397 5055 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.050408 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bd3f46f-87e3-42e7-a37d-e746fb601f60-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.050420 5055 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/71344487-c426-47fe-85cb-927c97465a5b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.051636 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8950328a-3275-4ba9-8bd8-ea4940f2eaac-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.052817 5055 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.052836 5055 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/af2ab03e-3011-4d6c-bbd5-ac5453d7785d-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.052950 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/056ae819-c243-4aa7-8214-1432f198dcab-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.053974 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.054864 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "50ae0390-2ce4-47fe-88af-831aee265a6c" (UID: "50ae0390-2ce4-47fe-88af-831aee265a6c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.055782 5055 scope.go:117] "RemoveContainer" containerID="8445d4a2e1d0c3a546cd9368a1877165d4bf7e98c38114091de01f3e6ca9a695" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.109388 5055 scope.go:117] "RemoveContainer" containerID="fa24be4ccfc52d857c58171af39de8465cca261f4d044114005b686c174d38c4" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.117348 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-config-data" (OuterVolumeSpecName: "config-data") pod "3cc22641-a524-415a-8377-580664ed5d90" (UID: "3cc22641-a524-415a-8377-580664ed5d90"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.153954 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b6c56af9-552f-4b5a-9648-f28af1328534-config-data-generated\") pod \"b6c56af9-552f-4b5a-9648-f28af1328534\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.154005 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-kolla-config\") pod \"b6c56af9-552f-4b5a-9648-f28af1328534\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.154040 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-operator-scripts\") pod \"b6c56af9-552f-4b5a-9648-f28af1328534\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.154062 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-secrets\") pod \"b6c56af9-552f-4b5a-9648-f28af1328534\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.154085 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-combined-ca-bundle\") pod \"b6c56af9-552f-4b5a-9648-f28af1328534\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.154314 5055 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/50ae0390-2ce4-47fe-88af-831aee265a6c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.154326 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cc22641-a524-415a-8377-580664ed5d90-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.155796 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "b6c56af9-552f-4b5a-9648-f28af1328534" (UID: "b6c56af9-552f-4b5a-9648-f28af1328534"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.158652 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6c56af9-552f-4b5a-9648-f28af1328534-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "b6c56af9-552f-4b5a-9648-f28af1328534" (UID: "b6c56af9-552f-4b5a-9648-f28af1328534"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.159618 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b6c56af9-552f-4b5a-9648-f28af1328534" (UID: "b6c56af9-552f-4b5a-9648-f28af1328534"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.161176 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-secrets" (OuterVolumeSpecName: "secrets") pod "b6c56af9-552f-4b5a-9648-f28af1328534" (UID: "b6c56af9-552f-4b5a-9648-f28af1328534"). InnerVolumeSpecName "secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.175683 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6c56af9-552f-4b5a-9648-f28af1328534" (UID: "b6c56af9-552f-4b5a-9648-f28af1328534"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.182684 5055 scope.go:117] "RemoveContainer" containerID="2e4ad5facb5f08bffeaad25b16cd84fad4909f52d97b67679d95970d047e100f" Oct 11 07:17:54 crc kubenswrapper[5055]: E1011 07:17:54.198298 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 11 07:17:54 crc kubenswrapper[5055]: E1011 07:17:54.199974 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 11 07:17:54 crc kubenswrapper[5055]: E1011 07:17:54.201274 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Oct 11 07:17:54 crc kubenswrapper[5055]: E1011 07:17:54.201298 5055 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="6ff3f14d-ff09-4533-89d6-e53afbda0dd6" containerName="nova-cell0-conductor-conductor" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.254883 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-config-data-default\") pod \"b6c56af9-552f-4b5a-9648-f28af1328534\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.254939 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-galera-tls-certs\") pod \"b6c56af9-552f-4b5a-9648-f28af1328534\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.255021 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dkx6v\" (UniqueName: \"kubernetes.io/projected/b6c56af9-552f-4b5a-9648-f28af1328534-kube-api-access-dkx6v\") pod \"b6c56af9-552f-4b5a-9648-f28af1328534\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.255061 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"b6c56af9-552f-4b5a-9648-f28af1328534\" (UID: \"b6c56af9-552f-4b5a-9648-f28af1328534\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.255469 5055 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b6c56af9-552f-4b5a-9648-f28af1328534-config-data-generated\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.255495 5055 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-kolla-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.255508 5055 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-operator-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.255519 5055 reconciler_common.go:293] "Volume detached for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-secrets\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.255549 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.256321 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "b6c56af9-552f-4b5a-9648-f28af1328534" (UID: "b6c56af9-552f-4b5a-9648-f28af1328534"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.262070 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6c56af9-552f-4b5a-9648-f28af1328534-kube-api-access-dkx6v" (OuterVolumeSpecName: "kube-api-access-dkx6v") pod "b6c56af9-552f-4b5a-9648-f28af1328534" (UID: "b6c56af9-552f-4b5a-9648-f28af1328534"). InnerVolumeSpecName "kube-api-access-dkx6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.265353 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "mysql-db") pod "b6c56af9-552f-4b5a-9648-f28af1328534" (UID: "b6c56af9-552f-4b5a-9648-f28af1328534"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.334598 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "b6c56af9-552f-4b5a-9648-f28af1328534" (UID: "b6c56af9-552f-4b5a-9648-f28af1328534"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.357061 5055 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b6c56af9-552f-4b5a-9648-f28af1328534-config-data-default\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.357114 5055 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6c56af9-552f-4b5a-9648-f28af1328534-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.357124 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dkx6v\" (UniqueName: \"kubernetes.io/projected/b6c56af9-552f-4b5a-9648-f28af1328534-kube-api-access-dkx6v\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.357154 5055 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.376817 5055 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.458992 5055 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.552571 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.558424 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.562384 5055 scope.go:117] "RemoveContainer" containerID="4e2bb178a97af21102f507810962f80618de3fea812c149a4cec5443c0f81b8d" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.567333 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.572910 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-7fffd4488c-r5vkt"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.578135 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-7fffd4488c-r5vkt"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.647869 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.657320 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.661336 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-combined-ca-bundle\") pod \"451f7ab0-0020-4931-a42f-f56eb5b28201\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.661427 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-internal-tls-certs\") pod \"451f7ab0-0020-4931-a42f-f56eb5b28201\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.661517 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-config-data\") pod \"451f7ab0-0020-4931-a42f-f56eb5b28201\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.661542 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-public-tls-certs\") pod \"451f7ab0-0020-4931-a42f-f56eb5b28201\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.661564 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tsvdl\" (UniqueName: \"kubernetes.io/projected/451f7ab0-0020-4931-a42f-f56eb5b28201-kube-api-access-tsvdl\") pod \"451f7ab0-0020-4931-a42f-f56eb5b28201\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.661609 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/451f7ab0-0020-4931-a42f-f56eb5b28201-logs\") pod \"451f7ab0-0020-4931-a42f-f56eb5b28201\" (UID: \"451f7ab0-0020-4931-a42f-f56eb5b28201\") " Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.662424 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/451f7ab0-0020-4931-a42f-f56eb5b28201-logs" (OuterVolumeSpecName: "logs") pod "451f7ab0-0020-4931-a42f-f56eb5b28201" (UID: "451f7ab0-0020-4931-a42f-f56eb5b28201"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.668178 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.673805 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.680939 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/451f7ab0-0020-4931-a42f-f56eb5b28201-kube-api-access-tsvdl" (OuterVolumeSpecName: "kube-api-access-tsvdl") pod "451f7ab0-0020-4931-a42f-f56eb5b28201" (UID: "451f7ab0-0020-4931-a42f-f56eb5b28201"). InnerVolumeSpecName "kube-api-access-tsvdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.692816 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-66cb796856-btqn8"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.703501 5055 scope.go:117] "RemoveContainer" containerID="2a3c3a9e15d6aecd7cda27492c58fef072ebf992a74d0cfa671a938bba40e6d9" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.717697 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-66cb796856-btqn8"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.737239 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.749831 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.756309 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.759697 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.759848 5055 scope.go:117] "RemoveContainer" containerID="4e2bb178a97af21102f507810962f80618de3fea812c149a4cec5443c0f81b8d" Oct 11 07:17:54 crc kubenswrapper[5055]: E1011 07:17:54.760434 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e2bb178a97af21102f507810962f80618de3fea812c149a4cec5443c0f81b8d\": container with ID starting with 4e2bb178a97af21102f507810962f80618de3fea812c149a4cec5443c0f81b8d not found: ID does not exist" containerID="4e2bb178a97af21102f507810962f80618de3fea812c149a4cec5443c0f81b8d" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.760464 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e2bb178a97af21102f507810962f80618de3fea812c149a4cec5443c0f81b8d"} err="failed to get container status \"4e2bb178a97af21102f507810962f80618de3fea812c149a4cec5443c0f81b8d\": rpc error: code = NotFound desc = could not find container \"4e2bb178a97af21102f507810962f80618de3fea812c149a4cec5443c0f81b8d\": container with ID starting with 4e2bb178a97af21102f507810962f80618de3fea812c149a4cec5443c0f81b8d not found: ID does not exist" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.760486 5055 scope.go:117] "RemoveContainer" containerID="2a3c3a9e15d6aecd7cda27492c58fef072ebf992a74d0cfa671a938bba40e6d9" Oct 11 07:17:54 crc kubenswrapper[5055]: E1011 07:17:54.760716 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a3c3a9e15d6aecd7cda27492c58fef072ebf992a74d0cfa671a938bba40e6d9\": container with ID starting with 2a3c3a9e15d6aecd7cda27492c58fef072ebf992a74d0cfa671a938bba40e6d9 not found: ID does not exist" containerID="2a3c3a9e15d6aecd7cda27492c58fef072ebf992a74d0cfa671a938bba40e6d9" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.760749 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a3c3a9e15d6aecd7cda27492c58fef072ebf992a74d0cfa671a938bba40e6d9"} err="failed to get container status \"2a3c3a9e15d6aecd7cda27492c58fef072ebf992a74d0cfa671a938bba40e6d9\": rpc error: code = NotFound desc = could not find container \"2a3c3a9e15d6aecd7cda27492c58fef072ebf992a74d0cfa671a938bba40e6d9\": container with ID starting with 2a3c3a9e15d6aecd7cda27492c58fef072ebf992a74d0cfa671a938bba40e6d9 not found: ID does not exist" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.760785 5055 scope.go:117] "RemoveContainer" containerID="d7f9eac5a0e6f11b97625c4fa2e0f22db3b2a7d4736d79f64658b64c84d7459c" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.763319 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tsvdl\" (UniqueName: \"kubernetes.io/projected/451f7ab0-0020-4931-a42f-f56eb5b28201-kube-api-access-tsvdl\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.763339 5055 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/451f7ab0-0020-4931-a42f-f56eb5b28201-logs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.765518 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.770188 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.783727 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.787987 5055 scope.go:117] "RemoveContainer" containerID="c1d53b317d7b7a6df70c5f5efdb8793f32b63b9f67d43aa9d015d200b5d8bac5" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.795051 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.812958 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-config-data" (OuterVolumeSpecName: "config-data") pod "451f7ab0-0020-4931-a42f-f56eb5b28201" (UID: "451f7ab0-0020-4931-a42f-f56eb5b28201"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.825080 5055 scope.go:117] "RemoveContainer" containerID="fb731c00bae126ce96a2762c9416854d8181fa8097933c9a989d30dcc1d84d82" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.828154 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "451f7ab0-0020-4931-a42f-f56eb5b28201" (UID: "451f7ab0-0020-4931-a42f-f56eb5b28201"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.829057 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "451f7ab0-0020-4931-a42f-f56eb5b28201" (UID: "451f7ab0-0020-4931-a42f-f56eb5b28201"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.845326 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "451f7ab0-0020-4931-a42f-f56eb5b28201" (UID: "451f7ab0-0020-4931-a42f-f56eb5b28201"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.860884 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zpjhx" event={"ID":"4fdc6f8c-1c91-45d3-80bc-d07c5953a385","Type":"ContainerStarted","Data":"acda0b4cd5acec63f77c66c5da7917409e55f01bb53aced3d693846b0f31bd33"} Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.864885 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.864918 5055 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.864929 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.864961 5055 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/451f7ab0-0020-4931-a42f-f56eb5b28201-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.869386 5055 generic.go:334] "Generic (PLEG): container finished" podID="451f7ab0-0020-4931-a42f-f56eb5b28201" containerID="d28b05014afad86588cab28904e2c797b56d4471ded7feefa065183a00b9d6ab" exitCode=0 Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.869515 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.869552 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"451f7ab0-0020-4931-a42f-f56eb5b28201","Type":"ContainerDied","Data":"d28b05014afad86588cab28904e2c797b56d4471ded7feefa065183a00b9d6ab"} Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.869594 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"451f7ab0-0020-4931-a42f-f56eb5b28201","Type":"ContainerDied","Data":"6112632e87c1745d89d18d736d80fbc7beda702b35d0db0b2ea2edef427f7a56"} Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.885277 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zpjhx" podStartSLOduration=5.19645346 podStartE2EDuration="6.88526374s" podCreationTimestamp="2025-10-11 07:17:48 +0000 UTC" firstStartedPulling="2025-10-11 07:17:52.644077278 +0000 UTC m=+1456.418351085" lastFinishedPulling="2025-10-11 07:17:54.332887558 +0000 UTC m=+1458.107161365" observedRunningTime="2025-10-11 07:17:54.882551593 +0000 UTC m=+1458.656825400" watchObservedRunningTime="2025-10-11 07:17:54.88526374 +0000 UTC m=+1458.659537547" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.909995 5055 scope.go:117] "RemoveContainer" containerID="c87bdbf1b6954a66227351f91bbd585daf3ee9facdce59279624e5a7379654be" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.920232 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_86907505-2c19-41ed-b72d-0b8bbeab1eec/ovn-northd/0.log" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.920269 5055 generic.go:334] "Generic (PLEG): container finished" podID="86907505-2c19-41ed-b72d-0b8bbeab1eec" containerID="85214f65e9b2ae3cca29ac8e1565c1deb2a9429c77e7931a85221321ed7db87e" exitCode=139 Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.920317 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"86907505-2c19-41ed-b72d-0b8bbeab1eec","Type":"ContainerDied","Data":"85214f65e9b2ae3cca29ac8e1565c1deb2a9429c77e7931a85221321ed7db87e"} Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.930283 5055 generic.go:334] "Generic (PLEG): container finished" podID="5108504f-f2dd-4f43-8d7a-f630e055d661" containerID="e8e6bf3ea958d93967bb08adda0bad8a6b27f850a70be0cd9489070df6fd455e" exitCode=0 Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.930423 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-867958d55b-prp6q" event={"ID":"5108504f-f2dd-4f43-8d7a-f630e055d661","Type":"ContainerDied","Data":"e8e6bf3ea958d93967bb08adda0bad8a6b27f850a70be0cd9489070df6fd455e"} Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.935615 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b6c56af9-552f-4b5a-9648-f28af1328534","Type":"ContainerDied","Data":"fabd2926d4dcd664ef9ed674710907fa694b3f741c421638b2feeca778cdbc3d"} Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.935692 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.938388 5055 generic.go:334] "Generic (PLEG): container finished" podID="6baacc00-a270-4662-ba67-aad18287df2c" containerID="afc6ef0c5aad5fc6c62e67db33d9a72dc22d3677a055603cfeadcd9d74609515" exitCode=0 Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.938500 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6baacc00-a270-4662-ba67-aad18287df2c","Type":"ContainerDied","Data":"afc6ef0c5aad5fc6c62e67db33d9a72dc22d3677a055603cfeadcd9d74609515"} Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.945712 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.950167 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.956330 5055 scope.go:117] "RemoveContainer" containerID="919ef65eeeea598c7b3f2a94cda4ed6f77049d1b185e1b4613c3eaa5ae3f9970" Oct 11 07:17:54 crc kubenswrapper[5055]: I1011 07:17:54.974919 5055 scope.go:117] "RemoveContainer" containerID="bbcfbd58af515683c0fa41f7883c1636fcf3f11322b07edf17837f0776642867" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.022324 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01de7a83-4a08-494a-a3b9-774e6787e30f" path="/var/lib/kubelet/pods/01de7a83-4a08-494a-a3b9-774e6787e30f/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.023163 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01e71e0c-56f0-4bfc-bd58-501785a5d235" path="/var/lib/kubelet/pods/01e71e0c-56f0-4bfc-bd58-501785a5d235/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.023714 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="056ae819-c243-4aa7-8214-1432f198dcab" path="/var/lib/kubelet/pods/056ae819-c243-4aa7-8214-1432f198dcab/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.027367 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bd3f46f-87e3-42e7-a37d-e746fb601f60" path="/var/lib/kubelet/pods/0bd3f46f-87e3-42e7-a37d-e746fb601f60/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.027959 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bebbd2e-a315-493d-820b-69e8dc749ee1" path="/var/lib/kubelet/pods/1bebbd2e-a315-493d-820b-69e8dc749ee1/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.032991 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="324f6d50-368a-4f8c-8cee-4a9ff543ba31" path="/var/lib/kubelet/pods/324f6d50-368a-4f8c-8cee-4a9ff543ba31/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.034179 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cc22641-a524-415a-8377-580664ed5d90" path="/var/lib/kubelet/pods/3cc22641-a524-415a-8377-580664ed5d90/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.034980 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="451f7ab0-0020-4931-a42f-f56eb5b28201" path="/var/lib/kubelet/pods/451f7ab0-0020-4931-a42f-f56eb5b28201/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.035647 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241" path="/var/lib/kubelet/pods/4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.036752 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50ae0390-2ce4-47fe-88af-831aee265a6c" path="/var/lib/kubelet/pods/50ae0390-2ce4-47fe-88af-831aee265a6c/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.037278 5055 scope.go:117] "RemoveContainer" containerID="e7de0a43fb54dfe049156667e0b998705b9e78b7bff9600f863738f69bbff3b9" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.037650 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a52d356-2098-4a61-be67-5519a2d2b78f" path="/var/lib/kubelet/pods/6a52d356-2098-4a61-be67-5519a2d2b78f/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.038657 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71344487-c426-47fe-85cb-927c97465a5b" path="/var/lib/kubelet/pods/71344487-c426-47fe-85cb-927c97465a5b/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.039366 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8950328a-3275-4ba9-8bd8-ea4940f2eaac" path="/var/lib/kubelet/pods/8950328a-3275-4ba9-8bd8-ea4940f2eaac/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.040469 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af2ab03e-3011-4d6c-bbd5-ac5453d7785d" path="/var/lib/kubelet/pods/af2ab03e-3011-4d6c-bbd5-ac5453d7785d/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.041735 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7e77464-d0e5-4a9b-97a1-557cac810654" path="/var/lib/kubelet/pods/c7e77464-d0e5-4a9b-97a1-557cac810654/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.043193 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2e0c881-0205-4317-ba03-cfb76f0f69e6" path="/var/lib/kubelet/pods/e2e0c881-0205-4317-ba03-cfb76f0f69e6/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.043647 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f360ed99-6efd-4281-a18b-7597df5341b6" path="/var/lib/kubelet/pods/f360ed99-6efd-4281-a18b-7597df5341b6/volumes" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.044294 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.044317 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.097220 5055 scope.go:117] "RemoveContainer" containerID="cf5f42235a42f219aee9d395cf87f7e377aaf80da0d2626e197e998f6d477ba8" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.129853 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.167376 5055 scope.go:117] "RemoveContainer" containerID="d28b05014afad86588cab28904e2c797b56d4471ded7feefa065183a00b9d6ab" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.264083 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.278269 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data\") pod \"6baacc00-a270-4662-ba67-aad18287df2c\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.278342 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-server-conf\") pod \"6baacc00-a270-4662-ba67-aad18287df2c\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.278373 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-plugins\") pod \"6baacc00-a270-4662-ba67-aad18287df2c\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.278397 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-tls\") pod \"6baacc00-a270-4662-ba67-aad18287df2c\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.278418 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmtr8\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-kube-api-access-cmtr8\") pod \"6baacc00-a270-4662-ba67-aad18287df2c\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.278445 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6baacc00-a270-4662-ba67-aad18287df2c-erlang-cookie-secret\") pod \"6baacc00-a270-4662-ba67-aad18287df2c\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.278517 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"6baacc00-a270-4662-ba67-aad18287df2c\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.278589 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-erlang-cookie\") pod \"6baacc00-a270-4662-ba67-aad18287df2c\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.278632 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6baacc00-a270-4662-ba67-aad18287df2c-pod-info\") pod \"6baacc00-a270-4662-ba67-aad18287df2c\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.278658 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-confd\") pod \"6baacc00-a270-4662-ba67-aad18287df2c\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.278684 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-plugins-conf\") pod \"6baacc00-a270-4662-ba67-aad18287df2c\" (UID: \"6baacc00-a270-4662-ba67-aad18287df2c\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.285211 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6baacc00-a270-4662-ba67-aad18287df2c-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "6baacc00-a270-4662-ba67-aad18287df2c" (UID: "6baacc00-a270-4662-ba67-aad18287df2c"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.292808 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "6baacc00-a270-4662-ba67-aad18287df2c" (UID: "6baacc00-a270-4662-ba67-aad18287df2c"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.295946 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-kube-api-access-cmtr8" (OuterVolumeSpecName: "kube-api-access-cmtr8") pod "6baacc00-a270-4662-ba67-aad18287df2c" (UID: "6baacc00-a270-4662-ba67-aad18287df2c"). InnerVolumeSpecName "kube-api-access-cmtr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.296445 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "6baacc00-a270-4662-ba67-aad18287df2c" (UID: "6baacc00-a270-4662-ba67-aad18287df2c"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.299229 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "6baacc00-a270-4662-ba67-aad18287df2c" (UID: "6baacc00-a270-4662-ba67-aad18287df2c"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.300972 5055 scope.go:117] "RemoveContainer" containerID="46946b80937373ad46df4157d6f6a132a04b8f37382322b6bc9b29dca6a898ce" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.301500 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "6baacc00-a270-4662-ba67-aad18287df2c" (UID: "6baacc00-a270-4662-ba67-aad18287df2c"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.313809 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "6baacc00-a270-4662-ba67-aad18287df2c" (UID: "6baacc00-a270-4662-ba67-aad18287df2c"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.314276 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/6baacc00-a270-4662-ba67-aad18287df2c-pod-info" (OuterVolumeSpecName: "pod-info") pod "6baacc00-a270-4662-ba67-aad18287df2c" (UID: "6baacc00-a270-4662-ba67-aad18287df2c"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: E1011 07:17:55.334732 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:17:55 crc kubenswrapper[5055]: E1011 07:17:55.335134 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:17:55 crc kubenswrapper[5055]: E1011 07:17:55.335377 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:17:55 crc kubenswrapper[5055]: E1011 07:17:55.335405 5055 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovsdb-server" Oct 11 07:17:55 crc kubenswrapper[5055]: E1011 07:17:55.335883 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:17:55 crc kubenswrapper[5055]: E1011 07:17:55.340454 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:17:55 crc kubenswrapper[5055]: E1011 07:17:55.349818 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:17:55 crc kubenswrapper[5055]: E1011 07:17:55.349878 5055 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovs-vswitchd" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.353669 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data" (OuterVolumeSpecName: "config-data") pod "6baacc00-a270-4662-ba67-aad18287df2c" (UID: "6baacc00-a270-4662-ba67-aad18287df2c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.362701 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-server-conf" (OuterVolumeSpecName: "server-conf") pod "6baacc00-a270-4662-ba67-aad18287df2c" (UID: "6baacc00-a270-4662-ba67-aad18287df2c"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.379621 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-fernet-keys\") pod \"5108504f-f2dd-4f43-8d7a-f630e055d661\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.379691 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-credential-keys\") pod \"5108504f-f2dd-4f43-8d7a-f630e055d661\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.379744 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-combined-ca-bundle\") pod \"5108504f-f2dd-4f43-8d7a-f630e055d661\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.379864 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6nlk\" (UniqueName: \"kubernetes.io/projected/5108504f-f2dd-4f43-8d7a-f630e055d661-kube-api-access-x6nlk\") pod \"5108504f-f2dd-4f43-8d7a-f630e055d661\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.379919 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-config-data\") pod \"5108504f-f2dd-4f43-8d7a-f630e055d661\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.379946 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-scripts\") pod \"5108504f-f2dd-4f43-8d7a-f630e055d661\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.380077 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-internal-tls-certs\") pod \"5108504f-f2dd-4f43-8d7a-f630e055d661\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.380118 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-public-tls-certs\") pod \"5108504f-f2dd-4f43-8d7a-f630e055d661\" (UID: \"5108504f-f2dd-4f43-8d7a-f630e055d661\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.380473 5055 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.380498 5055 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.380512 5055 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6baacc00-a270-4662-ba67-aad18287df2c-pod-info\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.380522 5055 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-plugins-conf\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.380532 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.380542 5055 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6baacc00-a270-4662-ba67-aad18287df2c-server-conf\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.380553 5055 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.380563 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmtr8\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-kube-api-access-cmtr8\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.380573 5055 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.380583 5055 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6baacc00-a270-4662-ba67-aad18287df2c-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.383863 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5108504f-f2dd-4f43-8d7a-f630e055d661" (UID: "5108504f-f2dd-4f43-8d7a-f630e055d661"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.386962 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "5108504f-f2dd-4f43-8d7a-f630e055d661" (UID: "5108504f-f2dd-4f43-8d7a-f630e055d661"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.390286 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-scripts" (OuterVolumeSpecName: "scripts") pod "5108504f-f2dd-4f43-8d7a-f630e055d661" (UID: "5108504f-f2dd-4f43-8d7a-f630e055d661"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.391665 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5108504f-f2dd-4f43-8d7a-f630e055d661-kube-api-access-x6nlk" (OuterVolumeSpecName: "kube-api-access-x6nlk") pod "5108504f-f2dd-4f43-8d7a-f630e055d661" (UID: "5108504f-f2dd-4f43-8d7a-f630e055d661"). InnerVolumeSpecName "kube-api-access-x6nlk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.405452 5055 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.416587 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-config-data" (OuterVolumeSpecName: "config-data") pod "5108504f-f2dd-4f43-8d7a-f630e055d661" (UID: "5108504f-f2dd-4f43-8d7a-f630e055d661"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.419273 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5108504f-f2dd-4f43-8d7a-f630e055d661" (UID: "5108504f-f2dd-4f43-8d7a-f630e055d661"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.425206 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_86907505-2c19-41ed-b72d-0b8bbeab1eec/ovn-northd/0.log" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.425464 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.429558 5055 scope.go:117] "RemoveContainer" containerID="d28b05014afad86588cab28904e2c797b56d4471ded7feefa065183a00b9d6ab" Oct 11 07:17:55 crc kubenswrapper[5055]: E1011 07:17:55.438045 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d28b05014afad86588cab28904e2c797b56d4471ded7feefa065183a00b9d6ab\": container with ID starting with d28b05014afad86588cab28904e2c797b56d4471ded7feefa065183a00b9d6ab not found: ID does not exist" containerID="d28b05014afad86588cab28904e2c797b56d4471ded7feefa065183a00b9d6ab" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.438112 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d28b05014afad86588cab28904e2c797b56d4471ded7feefa065183a00b9d6ab"} err="failed to get container status \"d28b05014afad86588cab28904e2c797b56d4471ded7feefa065183a00b9d6ab\": rpc error: code = NotFound desc = could not find container \"d28b05014afad86588cab28904e2c797b56d4471ded7feefa065183a00b9d6ab\": container with ID starting with d28b05014afad86588cab28904e2c797b56d4471ded7feefa065183a00b9d6ab not found: ID does not exist" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.438175 5055 scope.go:117] "RemoveContainer" containerID="46946b80937373ad46df4157d6f6a132a04b8f37382322b6bc9b29dca6a898ce" Oct 11 07:17:55 crc kubenswrapper[5055]: E1011 07:17:55.438748 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46946b80937373ad46df4157d6f6a132a04b8f37382322b6bc9b29dca6a898ce\": container with ID starting with 46946b80937373ad46df4157d6f6a132a04b8f37382322b6bc9b29dca6a898ce not found: ID does not exist" containerID="46946b80937373ad46df4157d6f6a132a04b8f37382322b6bc9b29dca6a898ce" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.438803 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46946b80937373ad46df4157d6f6a132a04b8f37382322b6bc9b29dca6a898ce"} err="failed to get container status \"46946b80937373ad46df4157d6f6a132a04b8f37382322b6bc9b29dca6a898ce\": rpc error: code = NotFound desc = could not find container \"46946b80937373ad46df4157d6f6a132a04b8f37382322b6bc9b29dca6a898ce\": container with ID starting with 46946b80937373ad46df4157d6f6a132a04b8f37382322b6bc9b29dca6a898ce not found: ID does not exist" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.438828 5055 scope.go:117] "RemoveContainer" containerID="5df485ac2f70f05a177e6e141ec2dc723c3f8a0fa7ae8342e45af4f8db485f9b" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.440111 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "6baacc00-a270-4662-ba67-aad18287df2c" (UID: "6baacc00-a270-4662-ba67-aad18287df2c"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.441423 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "5108504f-f2dd-4f43-8d7a-f630e055d661" (UID: "5108504f-f2dd-4f43-8d7a-f630e055d661"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.457317 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.473201 5055 scope.go:117] "RemoveContainer" containerID="a36238e8a93e4627ba8d72309cd8bd57e2a2425d6a1b88bf78c70b8fff37dde5" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.473236 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "5108504f-f2dd-4f43-8d7a-f630e055d661" (UID: "5108504f-f2dd-4f43-8d7a-f630e055d661"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.484446 5055 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.484471 5055 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.484480 5055 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.484488 5055 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6baacc00-a270-4662-ba67-aad18287df2c-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.484497 5055 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-credential-keys\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.484507 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.484517 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6nlk\" (UniqueName: \"kubernetes.io/projected/5108504f-f2dd-4f43-8d7a-f630e055d661-kube-api-access-x6nlk\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.484529 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.484538 5055 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.484551 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5108504f-f2dd-4f43-8d7a-f630e055d661-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585467 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/80597a79-e3fd-41cd-b035-a35494775fcb-pod-info\") pod \"80597a79-e3fd-41cd-b035-a35494775fcb\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585511 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-server-conf\") pod \"80597a79-e3fd-41cd-b035-a35494775fcb\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585546 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-plugins\") pod \"80597a79-e3fd-41cd-b035-a35494775fcb\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585567 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"80597a79-e3fd-41cd-b035-a35494775fcb\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585586 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-ovn-northd-tls-certs\") pod \"86907505-2c19-41ed-b72d-0b8bbeab1eec\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585604 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86907505-2c19-41ed-b72d-0b8bbeab1eec-scripts\") pod \"86907505-2c19-41ed-b72d-0b8bbeab1eec\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585636 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/80597a79-e3fd-41cd-b035-a35494775fcb-erlang-cookie-secret\") pod \"80597a79-e3fd-41cd-b035-a35494775fcb\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585661 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d5kj\" (UniqueName: \"kubernetes.io/projected/86907505-2c19-41ed-b72d-0b8bbeab1eec-kube-api-access-4d5kj\") pod \"86907505-2c19-41ed-b72d-0b8bbeab1eec\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585677 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-combined-ca-bundle\") pod \"86907505-2c19-41ed-b72d-0b8bbeab1eec\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585697 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-confd\") pod \"80597a79-e3fd-41cd-b035-a35494775fcb\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585731 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-metrics-certs-tls-certs\") pod \"86907505-2c19-41ed-b72d-0b8bbeab1eec\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585756 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-tls\") pod \"80597a79-e3fd-41cd-b035-a35494775fcb\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585804 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data\") pod \"80597a79-e3fd-41cd-b035-a35494775fcb\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585836 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-erlang-cookie\") pod \"80597a79-e3fd-41cd-b035-a35494775fcb\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585863 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hl52b\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-kube-api-access-hl52b\") pod \"80597a79-e3fd-41cd-b035-a35494775fcb\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585886 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/86907505-2c19-41ed-b72d-0b8bbeab1eec-ovn-rundir\") pod \"86907505-2c19-41ed-b72d-0b8bbeab1eec\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585900 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-plugins-conf\") pod \"80597a79-e3fd-41cd-b035-a35494775fcb\" (UID: \"80597a79-e3fd-41cd-b035-a35494775fcb\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.585933 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86907505-2c19-41ed-b72d-0b8bbeab1eec-config\") pod \"86907505-2c19-41ed-b72d-0b8bbeab1eec\" (UID: \"86907505-2c19-41ed-b72d-0b8bbeab1eec\") " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.586853 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86907505-2c19-41ed-b72d-0b8bbeab1eec-config" (OuterVolumeSpecName: "config") pod "86907505-2c19-41ed-b72d-0b8bbeab1eec" (UID: "86907505-2c19-41ed-b72d-0b8bbeab1eec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.587165 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "80597a79-e3fd-41cd-b035-a35494775fcb" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.587209 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86907505-2c19-41ed-b72d-0b8bbeab1eec-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "86907505-2c19-41ed-b72d-0b8bbeab1eec" (UID: "86907505-2c19-41ed-b72d-0b8bbeab1eec"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.587574 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "80597a79-e3fd-41cd-b035-a35494775fcb" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.587915 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86907505-2c19-41ed-b72d-0b8bbeab1eec-scripts" (OuterVolumeSpecName: "scripts") pod "86907505-2c19-41ed-b72d-0b8bbeab1eec" (UID: "86907505-2c19-41ed-b72d-0b8bbeab1eec"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.589654 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-kube-api-access-hl52b" (OuterVolumeSpecName: "kube-api-access-hl52b") pod "80597a79-e3fd-41cd-b035-a35494775fcb" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb"). InnerVolumeSpecName "kube-api-access-hl52b". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.590112 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "80597a79-e3fd-41cd-b035-a35494775fcb" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.592933 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86907505-2c19-41ed-b72d-0b8bbeab1eec-kube-api-access-4d5kj" (OuterVolumeSpecName: "kube-api-access-4d5kj") pod "86907505-2c19-41ed-b72d-0b8bbeab1eec" (UID: "86907505-2c19-41ed-b72d-0b8bbeab1eec"). InnerVolumeSpecName "kube-api-access-4d5kj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.594478 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/80597a79-e3fd-41cd-b035-a35494775fcb-pod-info" (OuterVolumeSpecName: "pod-info") pod "80597a79-e3fd-41cd-b035-a35494775fcb" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.594495 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "persistence") pod "80597a79-e3fd-41cd-b035-a35494775fcb" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.594877 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "80597a79-e3fd-41cd-b035-a35494775fcb" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.595001 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80597a79-e3fd-41cd-b035-a35494775fcb-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "80597a79-e3fd-41cd-b035-a35494775fcb" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.610245 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data" (OuterVolumeSpecName: "config-data") pod "80597a79-e3fd-41cd-b035-a35494775fcb" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.621981 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "86907505-2c19-41ed-b72d-0b8bbeab1eec" (UID: "86907505-2c19-41ed-b72d-0b8bbeab1eec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.634605 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-server-conf" (OuterVolumeSpecName: "server-conf") pod "80597a79-e3fd-41cd-b035-a35494775fcb" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.659292 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "86907505-2c19-41ed-b72d-0b8bbeab1eec" (UID: "86907505-2c19-41ed-b72d-0b8bbeab1eec"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.660162 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "86907505-2c19-41ed-b72d-0b8bbeab1eec" (UID: "86907505-2c19-41ed-b72d-0b8bbeab1eec"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.682247 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "80597a79-e3fd-41cd-b035-a35494775fcb" (UID: "80597a79-e3fd-41cd-b035-a35494775fcb"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688524 5055 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/86907505-2c19-41ed-b72d-0b8bbeab1eec-ovn-rundir\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688557 5055 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-plugins-conf\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688576 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86907505-2c19-41ed-b72d-0b8bbeab1eec-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688587 5055 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/80597a79-e3fd-41cd-b035-a35494775fcb-pod-info\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688597 5055 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-server-conf\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688607 5055 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688636 5055 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688648 5055 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688662 5055 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/80597a79-e3fd-41cd-b035-a35494775fcb-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688673 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/86907505-2c19-41ed-b72d-0b8bbeab1eec-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688684 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d5kj\" (UniqueName: \"kubernetes.io/projected/86907505-2c19-41ed-b72d-0b8bbeab1eec-kube-api-access-4d5kj\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688696 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688706 5055 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688717 5055 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/86907505-2c19-41ed-b72d-0b8bbeab1eec-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688727 5055 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688736 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/80597a79-e3fd-41cd-b035-a35494775fcb-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688747 5055 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/80597a79-e3fd-41cd-b035-a35494775fcb-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.688760 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hl52b\" (UniqueName: \"kubernetes.io/projected/80597a79-e3fd-41cd-b035-a35494775fcb-kube-api-access-hl52b\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.707962 5055 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.790281 5055 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.952134 5055 generic.go:334] "Generic (PLEG): container finished" podID="80597a79-e3fd-41cd-b035-a35494775fcb" containerID="f88b82c51e63276746015a1cc5807c62948a2cdab828c2f6b3281f634111aa61" exitCode=0 Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.952188 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"80597a79-e3fd-41cd-b035-a35494775fcb","Type":"ContainerDied","Data":"f88b82c51e63276746015a1cc5807c62948a2cdab828c2f6b3281f634111aa61"} Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.952212 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"80597a79-e3fd-41cd-b035-a35494775fcb","Type":"ContainerDied","Data":"dfd5dce990ae800c1bc52587c308c4264b7b7a1cbc5657bb76970072b00bed33"} Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.952229 5055 scope.go:117] "RemoveContainer" containerID="f88b82c51e63276746015a1cc5807c62948a2cdab828c2f6b3281f634111aa61" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.952336 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.965169 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6baacc00-a270-4662-ba67-aad18287df2c","Type":"ContainerDied","Data":"0bfc61d188e773eec448e25619ee53677630ef0d3cab1639155d5561dcf3c97f"} Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.965582 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.973331 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_86907505-2c19-41ed-b72d-0b8bbeab1eec/ovn-northd/0.log" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.973405 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"86907505-2c19-41ed-b72d-0b8bbeab1eec","Type":"ContainerDied","Data":"7671a3c8e53a3012cc95874e8cd5aecd0cd38d3f4d1ffe9c66160a30d833a91f"} Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.973470 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.982594 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-867958d55b-prp6q" event={"ID":"5108504f-f2dd-4f43-8d7a-f630e055d661","Type":"ContainerDied","Data":"f181c4ee9b4630984c1970b1f87eb8b3233446ded580b2a186ef858f60c78ea9"} Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.985188 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-867958d55b-prp6q" Oct 11 07:17:55 crc kubenswrapper[5055]: I1011 07:17:55.994838 5055 scope.go:117] "RemoveContainer" containerID="327706891e223da16a2f57a33e91fd6f8d0cf761a883a606b6cd91aaedf11f3e" Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.010476 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.014652 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.024574 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.028126 5055 scope.go:117] "RemoveContainer" containerID="f88b82c51e63276746015a1cc5807c62948a2cdab828c2f6b3281f634111aa61" Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.029090 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 11 07:17:56 crc kubenswrapper[5055]: E1011 07:17:56.033757 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f88b82c51e63276746015a1cc5807c62948a2cdab828c2f6b3281f634111aa61\": container with ID starting with f88b82c51e63276746015a1cc5807c62948a2cdab828c2f6b3281f634111aa61 not found: ID does not exist" containerID="f88b82c51e63276746015a1cc5807c62948a2cdab828c2f6b3281f634111aa61" Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.034044 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f88b82c51e63276746015a1cc5807c62948a2cdab828c2f6b3281f634111aa61"} err="failed to get container status \"f88b82c51e63276746015a1cc5807c62948a2cdab828c2f6b3281f634111aa61\": rpc error: code = NotFound desc = could not find container \"f88b82c51e63276746015a1cc5807c62948a2cdab828c2f6b3281f634111aa61\": container with ID starting with f88b82c51e63276746015a1cc5807c62948a2cdab828c2f6b3281f634111aa61 not found: ID does not exist" Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.034088 5055 scope.go:117] "RemoveContainer" containerID="327706891e223da16a2f57a33e91fd6f8d0cf761a883a606b6cd91aaedf11f3e" Oct 11 07:17:56 crc kubenswrapper[5055]: E1011 07:17:56.034531 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"327706891e223da16a2f57a33e91fd6f8d0cf761a883a606b6cd91aaedf11f3e\": container with ID starting with 327706891e223da16a2f57a33e91fd6f8d0cf761a883a606b6cd91aaedf11f3e not found: ID does not exist" containerID="327706891e223da16a2f57a33e91fd6f8d0cf761a883a606b6cd91aaedf11f3e" Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.034559 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"327706891e223da16a2f57a33e91fd6f8d0cf761a883a606b6cd91aaedf11f3e"} err="failed to get container status \"327706891e223da16a2f57a33e91fd6f8d0cf761a883a606b6cd91aaedf11f3e\": rpc error: code = NotFound desc = could not find container \"327706891e223da16a2f57a33e91fd6f8d0cf761a883a606b6cd91aaedf11f3e\": container with ID starting with 327706891e223da16a2f57a33e91fd6f8d0cf761a883a606b6cd91aaedf11f3e not found: ID does not exist" Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.034580 5055 scope.go:117] "RemoveContainer" containerID="afc6ef0c5aad5fc6c62e67db33d9a72dc22d3677a055603cfeadcd9d74609515" Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.041119 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.052832 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.052893 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-867958d55b-prp6q"] Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.056885 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-867958d55b-prp6q"] Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.063932 5055 scope.go:117] "RemoveContainer" containerID="f0982b0e0f3eeaf7aa5484768477b7a1133fccf88ef1ea8df59f16ddad4fc302" Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.083854 5055 scope.go:117] "RemoveContainer" containerID="5da9581c5841f93d450f53683dcf5e30ef31a1b137be5b1ebde5cd58b90187ab" Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.106272 5055 scope.go:117] "RemoveContainer" containerID="85214f65e9b2ae3cca29ac8e1565c1deb2a9429c77e7931a85221321ed7db87e" Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.122999 5055 scope.go:117] "RemoveContainer" containerID="e8e6bf3ea958d93967bb08adda0bad8a6b27f850a70be0cd9489070df6fd455e" Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.786945 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.907819 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9fwb\" (UniqueName: \"kubernetes.io/projected/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-kube-api-access-c9fwb\") pod \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\" (UID: \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\") " Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.908611 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-config-data\") pod \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\" (UID: \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\") " Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.908648 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-combined-ca-bundle\") pod \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\" (UID: \"6ff3f14d-ff09-4533-89d6-e53afbda0dd6\") " Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.912837 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-kube-api-access-c9fwb" (OuterVolumeSpecName: "kube-api-access-c9fwb") pod "6ff3f14d-ff09-4533-89d6-e53afbda0dd6" (UID: "6ff3f14d-ff09-4533-89d6-e53afbda0dd6"). InnerVolumeSpecName "kube-api-access-c9fwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.927479 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-config-data" (OuterVolumeSpecName: "config-data") pod "6ff3f14d-ff09-4533-89d6-e53afbda0dd6" (UID: "6ff3f14d-ff09-4533-89d6-e53afbda0dd6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:56 crc kubenswrapper[5055]: I1011 07:17:56.928944 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ff3f14d-ff09-4533-89d6-e53afbda0dd6" (UID: "6ff3f14d-ff09-4533-89d6-e53afbda0dd6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.001424 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5108504f-f2dd-4f43-8d7a-f630e055d661" path="/var/lib/kubelet/pods/5108504f-f2dd-4f43-8d7a-f630e055d661/volumes" Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.002256 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6baacc00-a270-4662-ba67-aad18287df2c" path="/var/lib/kubelet/pods/6baacc00-a270-4662-ba67-aad18287df2c/volumes" Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.002984 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80597a79-e3fd-41cd-b035-a35494775fcb" path="/var/lib/kubelet/pods/80597a79-e3fd-41cd-b035-a35494775fcb/volumes" Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.003989 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86907505-2c19-41ed-b72d-0b8bbeab1eec" path="/var/lib/kubelet/pods/86907505-2c19-41ed-b72d-0b8bbeab1eec/volumes" Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.004602 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6c56af9-552f-4b5a-9648-f28af1328534" path="/var/lib/kubelet/pods/b6c56af9-552f-4b5a-9648-f28af1328534/volumes" Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.009007 5055 generic.go:334] "Generic (PLEG): container finished" podID="6ff3f14d-ff09-4533-89d6-e53afbda0dd6" containerID="b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9" exitCode=0 Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.009141 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"6ff3f14d-ff09-4533-89d6-e53afbda0dd6","Type":"ContainerDied","Data":"b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9"} Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.009230 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"6ff3f14d-ff09-4533-89d6-e53afbda0dd6","Type":"ContainerDied","Data":"e1eb547c910a476ce37a41d13f0ba306a1547be5b15340a4cf5986f589b3ba80"} Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.009320 5055 scope.go:117] "RemoveContainer" containerID="b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9" Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.009539 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.013372 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9fwb\" (UniqueName: \"kubernetes.io/projected/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-kube-api-access-c9fwb\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.013402 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.013415 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ff3f14d-ff09-4533-89d6-e53afbda0dd6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.055234 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.059321 5055 scope.go:117] "RemoveContainer" containerID="b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9" Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.059533 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 11 07:17:57 crc kubenswrapper[5055]: E1011 07:17:57.059724 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9\": container with ID starting with b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9 not found: ID does not exist" containerID="b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9" Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.059776 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9"} err="failed to get container status \"b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9\": rpc error: code = NotFound desc = could not find container \"b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9\": container with ID starting with b3e9f172b79de16f3edd9ef745a8fc8ca0482cbbac8ef1d7c3f9df6dc8e119e9 not found: ID does not exist" Oct 11 07:17:57 crc kubenswrapper[5055]: I1011 07:17:57.953105 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="af2ab03e-3011-4d6c-bbd5-ac5453d7785d" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.0.191:8081/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 11 07:17:59 crc kubenswrapper[5055]: I1011 07:17:59.004651 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ff3f14d-ff09-4533-89d6-e53afbda0dd6" path="/var/lib/kubelet/pods/6ff3f14d-ff09-4533-89d6-e53afbda0dd6/volumes" Oct 11 07:17:59 crc kubenswrapper[5055]: I1011 07:17:59.921483 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:17:59 crc kubenswrapper[5055]: I1011 07:17:59.921832 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:17:59 crc kubenswrapper[5055]: I1011 07:17:59.965296 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:18:00 crc kubenswrapper[5055]: I1011 07:18:00.090299 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:18:00 crc kubenswrapper[5055]: E1011 07:18:00.333799 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:18:00 crc kubenswrapper[5055]: E1011 07:18:00.334483 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:18:00 crc kubenswrapper[5055]: E1011 07:18:00.334852 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:18:00 crc kubenswrapper[5055]: E1011 07:18:00.335369 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:18:00 crc kubenswrapper[5055]: E1011 07:18:00.335435 5055 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovsdb-server" Oct 11 07:18:00 crc kubenswrapper[5055]: E1011 07:18:00.338411 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:18:00 crc kubenswrapper[5055]: E1011 07:18:00.340052 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:18:00 crc kubenswrapper[5055]: E1011 07:18:00.340123 5055 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovs-vswitchd" Oct 11 07:18:00 crc kubenswrapper[5055]: I1011 07:18:00.962026 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zpjhx"] Oct 11 07:18:02 crc kubenswrapper[5055]: I1011 07:18:02.070530 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zpjhx" podUID="4fdc6f8c-1c91-45d3-80bc-d07c5953a385" containerName="registry-server" containerID="cri-o://acda0b4cd5acec63f77c66c5da7917409e55f01bb53aced3d693846b0f31bd33" gracePeriod=2 Oct 11 07:18:02 crc kubenswrapper[5055]: I1011 07:18:02.421821 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:18:02 crc kubenswrapper[5055]: I1011 07:18:02.422337 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:18:02 crc kubenswrapper[5055]: I1011 07:18:02.544961 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:18:02 crc kubenswrapper[5055]: I1011 07:18:02.704374 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-utilities\") pod \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\" (UID: \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\") " Oct 11 07:18:02 crc kubenswrapper[5055]: I1011 07:18:02.704455 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-catalog-content\") pod \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\" (UID: \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\") " Oct 11 07:18:02 crc kubenswrapper[5055]: I1011 07:18:02.704563 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4px6k\" (UniqueName: \"kubernetes.io/projected/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-kube-api-access-4px6k\") pod \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\" (UID: \"4fdc6f8c-1c91-45d3-80bc-d07c5953a385\") " Oct 11 07:18:02 crc kubenswrapper[5055]: I1011 07:18:02.706123 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-utilities" (OuterVolumeSpecName: "utilities") pod "4fdc6f8c-1c91-45d3-80bc-d07c5953a385" (UID: "4fdc6f8c-1c91-45d3-80bc-d07c5953a385"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:18:02 crc kubenswrapper[5055]: I1011 07:18:02.713694 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-kube-api-access-4px6k" (OuterVolumeSpecName: "kube-api-access-4px6k") pod "4fdc6f8c-1c91-45d3-80bc-d07c5953a385" (UID: "4fdc6f8c-1c91-45d3-80bc-d07c5953a385"). InnerVolumeSpecName "kube-api-access-4px6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:18:02 crc kubenswrapper[5055]: I1011 07:18:02.718983 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4fdc6f8c-1c91-45d3-80bc-d07c5953a385" (UID: "4fdc6f8c-1c91-45d3-80bc-d07c5953a385"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:18:02 crc kubenswrapper[5055]: I1011 07:18:02.806050 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:02 crc kubenswrapper[5055]: I1011 07:18:02.806083 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:02 crc kubenswrapper[5055]: I1011 07:18:02.806099 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4px6k\" (UniqueName: \"kubernetes.io/projected/4fdc6f8c-1c91-45d3-80bc-d07c5953a385-kube-api-access-4px6k\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.083201 5055 generic.go:334] "Generic (PLEG): container finished" podID="4fdc6f8c-1c91-45d3-80bc-d07c5953a385" containerID="acda0b4cd5acec63f77c66c5da7917409e55f01bb53aced3d693846b0f31bd33" exitCode=0 Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.083261 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zpjhx" event={"ID":"4fdc6f8c-1c91-45d3-80bc-d07c5953a385","Type":"ContainerDied","Data":"acda0b4cd5acec63f77c66c5da7917409e55f01bb53aced3d693846b0f31bd33"} Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.083305 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zpjhx" event={"ID":"4fdc6f8c-1c91-45d3-80bc-d07c5953a385","Type":"ContainerDied","Data":"bd430b9b7cb485334722137314bd920920e251188a54119a9ef666b1d4dba06d"} Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.083324 5055 scope.go:117] "RemoveContainer" containerID="acda0b4cd5acec63f77c66c5da7917409e55f01bb53aced3d693846b0f31bd33" Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.083512 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zpjhx" Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.104240 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zpjhx"] Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.108404 5055 scope.go:117] "RemoveContainer" containerID="07c511b3e3f69f863f49b552b08aeb22ae5ae648a9afae950120aff03963e1d2" Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.108810 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zpjhx"] Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.159242 5055 scope.go:117] "RemoveContainer" containerID="af810127c5fc81fd3e307037286fa19e2ade443da2ed3e97626fa9c0a5ba8601" Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.174527 5055 scope.go:117] "RemoveContainer" containerID="acda0b4cd5acec63f77c66c5da7917409e55f01bb53aced3d693846b0f31bd33" Oct 11 07:18:03 crc kubenswrapper[5055]: E1011 07:18:03.174897 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acda0b4cd5acec63f77c66c5da7917409e55f01bb53aced3d693846b0f31bd33\": container with ID starting with acda0b4cd5acec63f77c66c5da7917409e55f01bb53aced3d693846b0f31bd33 not found: ID does not exist" containerID="acda0b4cd5acec63f77c66c5da7917409e55f01bb53aced3d693846b0f31bd33" Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.174939 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acda0b4cd5acec63f77c66c5da7917409e55f01bb53aced3d693846b0f31bd33"} err="failed to get container status \"acda0b4cd5acec63f77c66c5da7917409e55f01bb53aced3d693846b0f31bd33\": rpc error: code = NotFound desc = could not find container \"acda0b4cd5acec63f77c66c5da7917409e55f01bb53aced3d693846b0f31bd33\": container with ID starting with acda0b4cd5acec63f77c66c5da7917409e55f01bb53aced3d693846b0f31bd33 not found: ID does not exist" Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.174963 5055 scope.go:117] "RemoveContainer" containerID="07c511b3e3f69f863f49b552b08aeb22ae5ae648a9afae950120aff03963e1d2" Oct 11 07:18:03 crc kubenswrapper[5055]: E1011 07:18:03.175194 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07c511b3e3f69f863f49b552b08aeb22ae5ae648a9afae950120aff03963e1d2\": container with ID starting with 07c511b3e3f69f863f49b552b08aeb22ae5ae648a9afae950120aff03963e1d2 not found: ID does not exist" containerID="07c511b3e3f69f863f49b552b08aeb22ae5ae648a9afae950120aff03963e1d2" Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.175223 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07c511b3e3f69f863f49b552b08aeb22ae5ae648a9afae950120aff03963e1d2"} err="failed to get container status \"07c511b3e3f69f863f49b552b08aeb22ae5ae648a9afae950120aff03963e1d2\": rpc error: code = NotFound desc = could not find container \"07c511b3e3f69f863f49b552b08aeb22ae5ae648a9afae950120aff03963e1d2\": container with ID starting with 07c511b3e3f69f863f49b552b08aeb22ae5ae648a9afae950120aff03963e1d2 not found: ID does not exist" Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.175243 5055 scope.go:117] "RemoveContainer" containerID="af810127c5fc81fd3e307037286fa19e2ade443da2ed3e97626fa9c0a5ba8601" Oct 11 07:18:03 crc kubenswrapper[5055]: E1011 07:18:03.175502 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af810127c5fc81fd3e307037286fa19e2ade443da2ed3e97626fa9c0a5ba8601\": container with ID starting with af810127c5fc81fd3e307037286fa19e2ade443da2ed3e97626fa9c0a5ba8601 not found: ID does not exist" containerID="af810127c5fc81fd3e307037286fa19e2ade443da2ed3e97626fa9c0a5ba8601" Oct 11 07:18:03 crc kubenswrapper[5055]: I1011 07:18:03.175531 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af810127c5fc81fd3e307037286fa19e2ade443da2ed3e97626fa9c0a5ba8601"} err="failed to get container status \"af810127c5fc81fd3e307037286fa19e2ade443da2ed3e97626fa9c0a5ba8601\": rpc error: code = NotFound desc = could not find container \"af810127c5fc81fd3e307037286fa19e2ade443da2ed3e97626fa9c0a5ba8601\": container with ID starting with af810127c5fc81fd3e307037286fa19e2ade443da2ed3e97626fa9c0a5ba8601 not found: ID does not exist" Oct 11 07:18:05 crc kubenswrapper[5055]: I1011 07:18:05.004438 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fdc6f8c-1c91-45d3-80bc-d07c5953a385" path="/var/lib/kubelet/pods/4fdc6f8c-1c91-45d3-80bc-d07c5953a385/volumes" Oct 11 07:18:05 crc kubenswrapper[5055]: I1011 07:18:05.030693 5055 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-58d4f6c497-jm75b" podUID="d9b61d74-f126-4515-ba61-151f8ec0b48c" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.159:9696/\": dial tcp 10.217.0.159:9696: connect: connection refused" Oct 11 07:18:05 crc kubenswrapper[5055]: E1011 07:18:05.334226 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:18:05 crc kubenswrapper[5055]: E1011 07:18:05.334640 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:18:05 crc kubenswrapper[5055]: E1011 07:18:05.335186 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:18:05 crc kubenswrapper[5055]: E1011 07:18:05.335234 5055 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovsdb-server" Oct 11 07:18:05 crc kubenswrapper[5055]: E1011 07:18:05.335300 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:18:05 crc kubenswrapper[5055]: E1011 07:18:05.340110 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:18:05 crc kubenswrapper[5055]: E1011 07:18:05.341565 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:18:05 crc kubenswrapper[5055]: E1011 07:18:05.341664 5055 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovs-vswitchd" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.520977 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.662951 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-public-tls-certs\") pod \"d9b61d74-f126-4515-ba61-151f8ec0b48c\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.663243 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-httpd-config\") pod \"d9b61d74-f126-4515-ba61-151f8ec0b48c\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.663276 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4klkm\" (UniqueName: \"kubernetes.io/projected/d9b61d74-f126-4515-ba61-151f8ec0b48c-kube-api-access-4klkm\") pod \"d9b61d74-f126-4515-ba61-151f8ec0b48c\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.663329 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-ovndb-tls-certs\") pod \"d9b61d74-f126-4515-ba61-151f8ec0b48c\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.663362 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-internal-tls-certs\") pod \"d9b61d74-f126-4515-ba61-151f8ec0b48c\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.663391 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-combined-ca-bundle\") pod \"d9b61d74-f126-4515-ba61-151f8ec0b48c\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.663877 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-config\") pod \"d9b61d74-f126-4515-ba61-151f8ec0b48c\" (UID: \"d9b61d74-f126-4515-ba61-151f8ec0b48c\") " Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.668081 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "d9b61d74-f126-4515-ba61-151f8ec0b48c" (UID: "d9b61d74-f126-4515-ba61-151f8ec0b48c"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.679082 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9b61d74-f126-4515-ba61-151f8ec0b48c-kube-api-access-4klkm" (OuterVolumeSpecName: "kube-api-access-4klkm") pod "d9b61d74-f126-4515-ba61-151f8ec0b48c" (UID: "d9b61d74-f126-4515-ba61-151f8ec0b48c"). InnerVolumeSpecName "kube-api-access-4klkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.698212 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d9b61d74-f126-4515-ba61-151f8ec0b48c" (UID: "d9b61d74-f126-4515-ba61-151f8ec0b48c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.704240 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d9b61d74-f126-4515-ba61-151f8ec0b48c" (UID: "d9b61d74-f126-4515-ba61-151f8ec0b48c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.704893 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-config" (OuterVolumeSpecName: "config") pod "d9b61d74-f126-4515-ba61-151f8ec0b48c" (UID: "d9b61d74-f126-4515-ba61-151f8ec0b48c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.718400 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "d9b61d74-f126-4515-ba61-151f8ec0b48c" (UID: "d9b61d74-f126-4515-ba61-151f8ec0b48c"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.721278 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d9b61d74-f126-4515-ba61-151f8ec0b48c" (UID: "d9b61d74-f126-4515-ba61-151f8ec0b48c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.766234 5055 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-httpd-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.766276 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4klkm\" (UniqueName: \"kubernetes.io/projected/d9b61d74-f126-4515-ba61-151f8ec0b48c-kube-api-access-4klkm\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.766292 5055 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.766304 5055 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.766315 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.766326 5055 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-config\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:06 crc kubenswrapper[5055]: I1011 07:18:06.766337 5055 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9b61d74-f126-4515-ba61-151f8ec0b48c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:07 crc kubenswrapper[5055]: I1011 07:18:07.129877 5055 generic.go:334] "Generic (PLEG): container finished" podID="d9b61d74-f126-4515-ba61-151f8ec0b48c" containerID="06242da869598afa53d67090239c671f6b5e00bbf9c80c47ff54cbbc1adc7265" exitCode=0 Oct 11 07:18:07 crc kubenswrapper[5055]: I1011 07:18:07.129937 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-58d4f6c497-jm75b" Oct 11 07:18:07 crc kubenswrapper[5055]: I1011 07:18:07.129951 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58d4f6c497-jm75b" event={"ID":"d9b61d74-f126-4515-ba61-151f8ec0b48c","Type":"ContainerDied","Data":"06242da869598afa53d67090239c671f6b5e00bbf9c80c47ff54cbbc1adc7265"} Oct 11 07:18:07 crc kubenswrapper[5055]: I1011 07:18:07.129987 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58d4f6c497-jm75b" event={"ID":"d9b61d74-f126-4515-ba61-151f8ec0b48c","Type":"ContainerDied","Data":"8a8ce8c7f50736ad08de9413b899006dfaf779888ac23790547c3cce4f8819e5"} Oct 11 07:18:07 crc kubenswrapper[5055]: I1011 07:18:07.130008 5055 scope.go:117] "RemoveContainer" containerID="1da381ef06212eb84424fa5dc015bebad8d90c047d971d7358425108cd91869c" Oct 11 07:18:07 crc kubenswrapper[5055]: I1011 07:18:07.148113 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-58d4f6c497-jm75b"] Oct 11 07:18:07 crc kubenswrapper[5055]: I1011 07:18:07.153204 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-58d4f6c497-jm75b"] Oct 11 07:18:07 crc kubenswrapper[5055]: I1011 07:18:07.159880 5055 scope.go:117] "RemoveContainer" containerID="06242da869598afa53d67090239c671f6b5e00bbf9c80c47ff54cbbc1adc7265" Oct 11 07:18:07 crc kubenswrapper[5055]: I1011 07:18:07.177961 5055 scope.go:117] "RemoveContainer" containerID="1da381ef06212eb84424fa5dc015bebad8d90c047d971d7358425108cd91869c" Oct 11 07:18:07 crc kubenswrapper[5055]: E1011 07:18:07.178358 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1da381ef06212eb84424fa5dc015bebad8d90c047d971d7358425108cd91869c\": container with ID starting with 1da381ef06212eb84424fa5dc015bebad8d90c047d971d7358425108cd91869c not found: ID does not exist" containerID="1da381ef06212eb84424fa5dc015bebad8d90c047d971d7358425108cd91869c" Oct 11 07:18:07 crc kubenswrapper[5055]: I1011 07:18:07.178395 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1da381ef06212eb84424fa5dc015bebad8d90c047d971d7358425108cd91869c"} err="failed to get container status \"1da381ef06212eb84424fa5dc015bebad8d90c047d971d7358425108cd91869c\": rpc error: code = NotFound desc = could not find container \"1da381ef06212eb84424fa5dc015bebad8d90c047d971d7358425108cd91869c\": container with ID starting with 1da381ef06212eb84424fa5dc015bebad8d90c047d971d7358425108cd91869c not found: ID does not exist" Oct 11 07:18:07 crc kubenswrapper[5055]: I1011 07:18:07.178420 5055 scope.go:117] "RemoveContainer" containerID="06242da869598afa53d67090239c671f6b5e00bbf9c80c47ff54cbbc1adc7265" Oct 11 07:18:07 crc kubenswrapper[5055]: E1011 07:18:07.178831 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06242da869598afa53d67090239c671f6b5e00bbf9c80c47ff54cbbc1adc7265\": container with ID starting with 06242da869598afa53d67090239c671f6b5e00bbf9c80c47ff54cbbc1adc7265 not found: ID does not exist" containerID="06242da869598afa53d67090239c671f6b5e00bbf9c80c47ff54cbbc1adc7265" Oct 11 07:18:07 crc kubenswrapper[5055]: I1011 07:18:07.178859 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06242da869598afa53d67090239c671f6b5e00bbf9c80c47ff54cbbc1adc7265"} err="failed to get container status \"06242da869598afa53d67090239c671f6b5e00bbf9c80c47ff54cbbc1adc7265\": rpc error: code = NotFound desc = could not find container \"06242da869598afa53d67090239c671f6b5e00bbf9c80c47ff54cbbc1adc7265\": container with ID starting with 06242da869598afa53d67090239c671f6b5e00bbf9c80c47ff54cbbc1adc7265 not found: ID does not exist" Oct 11 07:18:09 crc kubenswrapper[5055]: I1011 07:18:09.009607 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9b61d74-f126-4515-ba61-151f8ec0b48c" path="/var/lib/kubelet/pods/d9b61d74-f126-4515-ba61-151f8ec0b48c/volumes" Oct 11 07:18:10 crc kubenswrapper[5055]: E1011 07:18:10.333320 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:18:10 crc kubenswrapper[5055]: E1011 07:18:10.334380 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:18:10 crc kubenswrapper[5055]: E1011 07:18:10.334936 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:18:10 crc kubenswrapper[5055]: E1011 07:18:10.335417 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:18:10 crc kubenswrapper[5055]: E1011 07:18:10.335488 5055 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovsdb-server" Oct 11 07:18:10 crc kubenswrapper[5055]: E1011 07:18:10.335907 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:18:10 crc kubenswrapper[5055]: E1011 07:18:10.337435 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:18:10 crc kubenswrapper[5055]: E1011 07:18:10.337558 5055 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovs-vswitchd" Oct 11 07:18:15 crc kubenswrapper[5055]: E1011 07:18:15.334043 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:18:15 crc kubenswrapper[5055]: E1011 07:18:15.335871 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:18:15 crc kubenswrapper[5055]: E1011 07:18:15.335871 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:18:15 crc kubenswrapper[5055]: E1011 07:18:15.336959 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Oct 11 07:18:15 crc kubenswrapper[5055]: E1011 07:18:15.337036 5055 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovsdb-server" Oct 11 07:18:15 crc kubenswrapper[5055]: E1011 07:18:15.337478 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:18:15 crc kubenswrapper[5055]: E1011 07:18:15.340519 5055 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Oct 11 07:18:15 crc kubenswrapper[5055]: E1011 07:18:15.340669 5055 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-5t8kh" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovs-vswitchd" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.102540 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-5t8kh_c5b9917d-52e6-4d08-a131-4d1b8b809161/ovs-vswitchd/0.log" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.104586 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.231559 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-etc-ovs\") pod \"c5b9917d-52e6-4d08-a131-4d1b8b809161\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.231655 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-lib\") pod \"c5b9917d-52e6-4d08-a131-4d1b8b809161\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.231718 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5b9917d-52e6-4d08-a131-4d1b8b809161-scripts\") pod \"c5b9917d-52e6-4d08-a131-4d1b8b809161\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.231653 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "c5b9917d-52e6-4d08-a131-4d1b8b809161" (UID: "c5b9917d-52e6-4d08-a131-4d1b8b809161"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.231760 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-run\") pod \"c5b9917d-52e6-4d08-a131-4d1b8b809161\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.231680 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-lib" (OuterVolumeSpecName: "var-lib") pod "c5b9917d-52e6-4d08-a131-4d1b8b809161" (UID: "c5b9917d-52e6-4d08-a131-4d1b8b809161"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.231870 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-log\") pod \"c5b9917d-52e6-4d08-a131-4d1b8b809161\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.231988 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cc7bh\" (UniqueName: \"kubernetes.io/projected/c5b9917d-52e6-4d08-a131-4d1b8b809161-kube-api-access-cc7bh\") pod \"c5b9917d-52e6-4d08-a131-4d1b8b809161\" (UID: \"c5b9917d-52e6-4d08-a131-4d1b8b809161\") " Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.232054 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-run" (OuterVolumeSpecName: "var-run") pod "c5b9917d-52e6-4d08-a131-4d1b8b809161" (UID: "c5b9917d-52e6-4d08-a131-4d1b8b809161"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.232102 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-log" (OuterVolumeSpecName: "var-log") pod "c5b9917d-52e6-4d08-a131-4d1b8b809161" (UID: "c5b9917d-52e6-4d08-a131-4d1b8b809161"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.232382 5055 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-etc-ovs\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.232406 5055 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-lib\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.232422 5055 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-run\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.232437 5055 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c5b9917d-52e6-4d08-a131-4d1b8b809161-var-log\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.232956 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5b9917d-52e6-4d08-a131-4d1b8b809161-scripts" (OuterVolumeSpecName: "scripts") pod "c5b9917d-52e6-4d08-a131-4d1b8b809161" (UID: "c5b9917d-52e6-4d08-a131-4d1b8b809161"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.236737 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5b9917d-52e6-4d08-a131-4d1b8b809161-kube-api-access-cc7bh" (OuterVolumeSpecName: "kube-api-access-cc7bh") pod "c5b9917d-52e6-4d08-a131-4d1b8b809161" (UID: "c5b9917d-52e6-4d08-a131-4d1b8b809161"). InnerVolumeSpecName "kube-api-access-cc7bh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.239705 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-5t8kh_c5b9917d-52e6-4d08-a131-4d1b8b809161/ovs-vswitchd/0.log" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.240562 5055 generic.go:334] "Generic (PLEG): container finished" podID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" exitCode=137 Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.240623 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-5t8kh" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.240622 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-5t8kh" event={"ID":"c5b9917d-52e6-4d08-a131-4d1b8b809161","Type":"ContainerDied","Data":"412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18"} Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.240812 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-5t8kh" event={"ID":"c5b9917d-52e6-4d08-a131-4d1b8b809161","Type":"ContainerDied","Data":"044d7c6b9fde12607a6a10179b58395680d371f4aaae03808c6593488274fb6d"} Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.240853 5055 scope.go:117] "RemoveContainer" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.282260 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-5t8kh"] Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.286547 5055 scope.go:117] "RemoveContainer" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.287796 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-5t8kh"] Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.306266 5055 scope.go:117] "RemoveContainer" containerID="854b6b5e925a5ffd9c7c9e173a7725c96891ecbf00c99076ae697f90314388b4" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.326728 5055 scope.go:117] "RemoveContainer" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" Oct 11 07:18:17 crc kubenswrapper[5055]: E1011 07:18:17.327350 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18\": container with ID starting with 412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18 not found: ID does not exist" containerID="412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.327469 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18"} err="failed to get container status \"412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18\": rpc error: code = NotFound desc = could not find container \"412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18\": container with ID starting with 412d39bd96f705af4569dc17cb50c15fa84986b8a68cf2f85f3080c740820d18 not found: ID does not exist" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.327499 5055 scope.go:117] "RemoveContainer" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" Oct 11 07:18:17 crc kubenswrapper[5055]: E1011 07:18:17.327741 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a\": container with ID starting with 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a not found: ID does not exist" containerID="9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.327788 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a"} err="failed to get container status \"9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a\": rpc error: code = NotFound desc = could not find container \"9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a\": container with ID starting with 9a037cf467c18ac20387fa70903455e5e98afadcf5dd720ae0ce0cc38f260a7a not found: ID does not exist" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.327806 5055 scope.go:117] "RemoveContainer" containerID="854b6b5e925a5ffd9c7c9e173a7725c96891ecbf00c99076ae697f90314388b4" Oct 11 07:18:17 crc kubenswrapper[5055]: E1011 07:18:17.328709 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"854b6b5e925a5ffd9c7c9e173a7725c96891ecbf00c99076ae697f90314388b4\": container with ID starting with 854b6b5e925a5ffd9c7c9e173a7725c96891ecbf00c99076ae697f90314388b4 not found: ID does not exist" containerID="854b6b5e925a5ffd9c7c9e173a7725c96891ecbf00c99076ae697f90314388b4" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.328734 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"854b6b5e925a5ffd9c7c9e173a7725c96891ecbf00c99076ae697f90314388b4"} err="failed to get container status \"854b6b5e925a5ffd9c7c9e173a7725c96891ecbf00c99076ae697f90314388b4\": rpc error: code = NotFound desc = could not find container \"854b6b5e925a5ffd9c7c9e173a7725c96891ecbf00c99076ae697f90314388b4\": container with ID starting with 854b6b5e925a5ffd9c7c9e173a7725c96891ecbf00c99076ae697f90314388b4 not found: ID does not exist" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.333503 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c5b9917d-52e6-4d08-a131-4d1b8b809161-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:17 crc kubenswrapper[5055]: I1011 07:18:17.333603 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cc7bh\" (UniqueName: \"kubernetes.io/projected/c5b9917d-52e6-4d08-a131-4d1b8b809161-kube-api-access-cc7bh\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.036365 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.143001 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-config-data-custom\") pod \"0bc70ff6-8410-4873-8030-2981e62e73f0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.143400 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rsw6r\" (UniqueName: \"kubernetes.io/projected/0bc70ff6-8410-4873-8030-2981e62e73f0-kube-api-access-rsw6r\") pod \"0bc70ff6-8410-4873-8030-2981e62e73f0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.143433 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-config-data\") pod \"0bc70ff6-8410-4873-8030-2981e62e73f0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.143470 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0bc70ff6-8410-4873-8030-2981e62e73f0-etc-machine-id\") pod \"0bc70ff6-8410-4873-8030-2981e62e73f0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.143537 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-combined-ca-bundle\") pod \"0bc70ff6-8410-4873-8030-2981e62e73f0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.143559 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-scripts\") pod \"0bc70ff6-8410-4873-8030-2981e62e73f0\" (UID: \"0bc70ff6-8410-4873-8030-2981e62e73f0\") " Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.143659 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0bc70ff6-8410-4873-8030-2981e62e73f0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "0bc70ff6-8410-4873-8030-2981e62e73f0" (UID: "0bc70ff6-8410-4873-8030-2981e62e73f0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.143945 5055 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0bc70ff6-8410-4873-8030-2981e62e73f0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.148572 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0bc70ff6-8410-4873-8030-2981e62e73f0" (UID: "0bc70ff6-8410-4873-8030-2981e62e73f0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.149414 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-scripts" (OuterVolumeSpecName: "scripts") pod "0bc70ff6-8410-4873-8030-2981e62e73f0" (UID: "0bc70ff6-8410-4873-8030-2981e62e73f0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.149489 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bc70ff6-8410-4873-8030-2981e62e73f0-kube-api-access-rsw6r" (OuterVolumeSpecName: "kube-api-access-rsw6r") pod "0bc70ff6-8410-4873-8030-2981e62e73f0" (UID: "0bc70ff6-8410-4873-8030-2981e62e73f0"). InnerVolumeSpecName "kube-api-access-rsw6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.247986 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0bc70ff6-8410-4873-8030-2981e62e73f0" (UID: "0bc70ff6-8410-4873-8030-2981e62e73f0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.248597 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rsw6r\" (UniqueName: \"kubernetes.io/projected/0bc70ff6-8410-4873-8030-2981e62e73f0-kube-api-access-rsw6r\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.248622 5055 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-scripts\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.248635 5055 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.258371 5055 generic.go:334] "Generic (PLEG): container finished" podID="0bc70ff6-8410-4873-8030-2981e62e73f0" containerID="24a4cd53f46b06fb18f2255bef83ecdbf5f9ae979c0de1b64947a5d04a3b01bc" exitCode=137 Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.258466 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.258481 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0bc70ff6-8410-4873-8030-2981e62e73f0","Type":"ContainerDied","Data":"24a4cd53f46b06fb18f2255bef83ecdbf5f9ae979c0de1b64947a5d04a3b01bc"} Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.258556 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0bc70ff6-8410-4873-8030-2981e62e73f0","Type":"ContainerDied","Data":"209dcfde6ff95c571105b7b1888d52894edf8e04d2e926dc08c6cf9f71baa1ca"} Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.258577 5055 scope.go:117] "RemoveContainer" containerID="1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.267478 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerID="06502cb3633b89b873682e6009f7ef8a153676dfb10f0635ff187c1ee1f7afb6" exitCode=137 Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.267576 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"06502cb3633b89b873682e6009f7ef8a153676dfb10f0635ff187c1ee1f7afb6"} Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.268691 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-config-data" (OuterVolumeSpecName: "config-data") pod "0bc70ff6-8410-4873-8030-2981e62e73f0" (UID: "0bc70ff6-8410-4873-8030-2981e62e73f0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.280040 5055 scope.go:117] "RemoveContainer" containerID="24a4cd53f46b06fb18f2255bef83ecdbf5f9ae979c0de1b64947a5d04a3b01bc" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.298555 5055 scope.go:117] "RemoveContainer" containerID="1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad" Oct 11 07:18:18 crc kubenswrapper[5055]: E1011 07:18:18.299140 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad\": container with ID starting with 1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad not found: ID does not exist" containerID="1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.299172 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad"} err="failed to get container status \"1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad\": rpc error: code = NotFound desc = could not find container \"1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad\": container with ID starting with 1b6a0fc66e44e17f3d868c7e8838ad07be52d091a44d8d80be2fde79e614c8ad not found: ID does not exist" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.299200 5055 scope.go:117] "RemoveContainer" containerID="24a4cd53f46b06fb18f2255bef83ecdbf5f9ae979c0de1b64947a5d04a3b01bc" Oct 11 07:18:18 crc kubenswrapper[5055]: E1011 07:18:18.299485 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24a4cd53f46b06fb18f2255bef83ecdbf5f9ae979c0de1b64947a5d04a3b01bc\": container with ID starting with 24a4cd53f46b06fb18f2255bef83ecdbf5f9ae979c0de1b64947a5d04a3b01bc not found: ID does not exist" containerID="24a4cd53f46b06fb18f2255bef83ecdbf5f9ae979c0de1b64947a5d04a3b01bc" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.299512 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24a4cd53f46b06fb18f2255bef83ecdbf5f9ae979c0de1b64947a5d04a3b01bc"} err="failed to get container status \"24a4cd53f46b06fb18f2255bef83ecdbf5f9ae979c0de1b64947a5d04a3b01bc\": rpc error: code = NotFound desc = could not find container \"24a4cd53f46b06fb18f2255bef83ecdbf5f9ae979c0de1b64947a5d04a3b01bc\": container with ID starting with 24a4cd53f46b06fb18f2255bef83ecdbf5f9ae979c0de1b64947a5d04a3b01bc not found: ID does not exist" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.350348 5055 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-config-data\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.350387 5055 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc70ff6-8410-4873-8030-2981e62e73f0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.371539 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.451276 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"b2f75249-4d9f-44bf-af62-de6757d2326a\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.451326 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ktf56\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-kube-api-access-ktf56\") pod \"b2f75249-4d9f-44bf-af62-de6757d2326a\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.451381 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b2f75249-4d9f-44bf-af62-de6757d2326a-cache\") pod \"b2f75249-4d9f-44bf-af62-de6757d2326a\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.451415 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift\") pod \"b2f75249-4d9f-44bf-af62-de6757d2326a\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.451464 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b2f75249-4d9f-44bf-af62-de6757d2326a-lock\") pod \"b2f75249-4d9f-44bf-af62-de6757d2326a\" (UID: \"b2f75249-4d9f-44bf-af62-de6757d2326a\") " Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.452035 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2f75249-4d9f-44bf-af62-de6757d2326a-lock" (OuterVolumeSpecName: "lock") pod "b2f75249-4d9f-44bf-af62-de6757d2326a" (UID: "b2f75249-4d9f-44bf-af62-de6757d2326a"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.452091 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2f75249-4d9f-44bf-af62-de6757d2326a-cache" (OuterVolumeSpecName: "cache") pod "b2f75249-4d9f-44bf-af62-de6757d2326a" (UID: "b2f75249-4d9f-44bf-af62-de6757d2326a"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.452251 5055 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/b2f75249-4d9f-44bf-af62-de6757d2326a-cache\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.452280 5055 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/b2f75249-4d9f-44bf-af62-de6757d2326a-lock\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.454885 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "swift") pod "b2f75249-4d9f-44bf-af62-de6757d2326a" (UID: "b2f75249-4d9f-44bf-af62-de6757d2326a"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.454973 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-kube-api-access-ktf56" (OuterVolumeSpecName: "kube-api-access-ktf56") pod "b2f75249-4d9f-44bf-af62-de6757d2326a" (UID: "b2f75249-4d9f-44bf-af62-de6757d2326a"). InnerVolumeSpecName "kube-api-access-ktf56". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.455361 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "b2f75249-4d9f-44bf-af62-de6757d2326a" (UID: "b2f75249-4d9f-44bf-af62-de6757d2326a"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.554332 5055 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.554427 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ktf56\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-kube-api-access-ktf56\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.554472 5055 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b2f75249-4d9f-44bf-af62-de6757d2326a-etc-swift\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.568360 5055 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.597366 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.603373 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 11 07:18:18 crc kubenswrapper[5055]: I1011 07:18:18.655012 5055 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.012706 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bc70ff6-8410-4873-8030-2981e62e73f0" path="/var/lib/kubelet/pods/0bc70ff6-8410-4873-8030-2981e62e73f0/volumes" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.014228 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" path="/var/lib/kubelet/pods/c5b9917d-52e6-4d08-a131-4d1b8b809161/volumes" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.293521 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"b2f75249-4d9f-44bf-af62-de6757d2326a","Type":"ContainerDied","Data":"c09532594bf617b6a38bf1a26c2b619ece7604f6ac71de6f1d8a39bab11a43fc"} Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.293717 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.293910 5055 scope.go:117] "RemoveContainer" containerID="06502cb3633b89b873682e6009f7ef8a153676dfb10f0635ff187c1ee1f7afb6" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.322862 5055 scope.go:117] "RemoveContainer" containerID="ec154ac9d6d4c663a8d3edda9d0ce209dd40a1836be6f8424d6593198e1668ad" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.327789 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.334833 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.343316 5055 scope.go:117] "RemoveContainer" containerID="f3423b44a9f0fea83f6193fc9009ffe5ed847e103301a15f805c18dd8d170f9e" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.367934 5055 scope.go:117] "RemoveContainer" containerID="ccd2a2a490675a60a807b626b3856bccab72588539a3eb4bd4bf5dcf6d915355" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.395990 5055 scope.go:117] "RemoveContainer" containerID="98a3bd9053451cb0809632f4b67e138c0b75c3d90227f09fcbaeb9c60c6b6bb6" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.416092 5055 scope.go:117] "RemoveContainer" containerID="99e83bf3a8eb2b7367fbf738da227780a1807885d63f0d0e27ddf82536f1f23f" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.433639 5055 scope.go:117] "RemoveContainer" containerID="e8435110db69165389151e0e5ced69984b6208142b9e0d1c97ebf70f6117a07b" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.451048 5055 scope.go:117] "RemoveContainer" containerID="29c5e0db8a9d4f0f2dc6b72ae8af6d9ada7256824853969407ab923a9710b4ca" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.472060 5055 scope.go:117] "RemoveContainer" containerID="52170fd074acd4d0b541fe66faa6dbfd0a1f3e12dbda56b05f7ebf193e4489b4" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.489553 5055 scope.go:117] "RemoveContainer" containerID="07ab4219d9bf5b7769a004dfff97e17520e92f8e30791238e6e136cf29d5a8e7" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.511645 5055 scope.go:117] "RemoveContainer" containerID="e41f04a80d59653f303efa7f5149b061d767b0550c8a0d24bd1f7a2ffb40a7a7" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.531531 5055 scope.go:117] "RemoveContainer" containerID="b49bbf96e1ad1276ddfb0167a4e342e71a2d3248b8dd30e171b623fe16b3e5b3" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.556389 5055 scope.go:117] "RemoveContainer" containerID="169a3663bf7e265b73d12b362a875432f15995d044bb153d925d7a1f2aded521" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.573069 5055 scope.go:117] "RemoveContainer" containerID="56af820548674816735daf22fa48f4a5a08266e6d8ffee8ee41bf2dc35f87bc1" Oct 11 07:18:19 crc kubenswrapper[5055]: I1011 07:18:19.590230 5055 scope.go:117] "RemoveContainer" containerID="c874955aa943101ec45b5190a57d5bc2728a209a837dabcf4f0dd080c233522f" Oct 11 07:18:21 crc kubenswrapper[5055]: I1011 07:18:21.011410 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" path="/var/lib/kubelet/pods/b2f75249-4d9f-44bf-af62-de6757d2326a/volumes" Oct 11 07:18:27 crc kubenswrapper[5055]: I1011 07:18:27.035328 5055 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod6ff3f14d-ff09-4533-89d6-e53afbda0dd6"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod6ff3f14d-ff09-4533-89d6-e53afbda0dd6] : Timed out while waiting for systemd to remove kubepods-besteffort-pod6ff3f14d_ff09_4533_89d6_e53afbda0dd6.slice" Oct 11 07:18:32 crc kubenswrapper[5055]: I1011 07:18:32.421910 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:18:32 crc kubenswrapper[5055]: I1011 07:18:32.422680 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:18:32 crc kubenswrapper[5055]: I1011 07:18:32.422802 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 07:18:32 crc kubenswrapper[5055]: I1011 07:18:32.423730 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 07:18:32 crc kubenswrapper[5055]: I1011 07:18:32.423872 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" gracePeriod=600 Oct 11 07:18:32 crc kubenswrapper[5055]: E1011 07:18:32.554438 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:18:33 crc kubenswrapper[5055]: I1011 07:18:33.435879 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" exitCode=0 Oct 11 07:18:33 crc kubenswrapper[5055]: I1011 07:18:33.435973 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117"} Oct 11 07:18:33 crc kubenswrapper[5055]: I1011 07:18:33.436351 5055 scope.go:117] "RemoveContainer" containerID="28fb3395db8b46734147bf5f696c45faf64d183a1700bf2cea197e5b447049b3" Oct 11 07:18:33 crc kubenswrapper[5055]: I1011 07:18:33.437139 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:18:33 crc kubenswrapper[5055]: E1011 07:18:33.437593 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:18:47 crc kubenswrapper[5055]: I1011 07:18:47.002023 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:18:47 crc kubenswrapper[5055]: E1011 07:18:47.003207 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:19:00 crc kubenswrapper[5055]: I1011 07:19:00.993854 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:19:00 crc kubenswrapper[5055]: E1011 07:19:00.995053 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:19:12 crc kubenswrapper[5055]: I1011 07:19:12.994126 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:19:12 crc kubenswrapper[5055]: E1011 07:19:12.995743 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:19:24 crc kubenswrapper[5055]: I1011 07:19:24.993057 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:19:24 crc kubenswrapper[5055]: E1011 07:19:24.995004 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:19:37 crc kubenswrapper[5055]: I1011 07:19:37.994081 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:19:37 crc kubenswrapper[5055]: E1011 07:19:37.995512 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:19:49 crc kubenswrapper[5055]: I1011 07:19:49.994579 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:19:49 crc kubenswrapper[5055]: E1011 07:19:49.996609 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:19:51 crc kubenswrapper[5055]: I1011 07:19:51.398699 5055 scope.go:117] "RemoveContainer" containerID="5f669ea0d81bde995efd9316f208e7c11cc6d09e1f2f5b5b60e815374539179a" Oct 11 07:19:51 crc kubenswrapper[5055]: I1011 07:19:51.421602 5055 scope.go:117] "RemoveContainer" containerID="10d71784edd3f6ec2fbce87c08a6c3ea956ccb7e0b266974fc673f0bc6f0afd2" Oct 11 07:19:51 crc kubenswrapper[5055]: I1011 07:19:51.466854 5055 scope.go:117] "RemoveContainer" containerID="12e5b5a9e7355897c5a5e7b56f13c98d9c7c5a690e2053ad0ed2cfdfabeddf21" Oct 11 07:19:51 crc kubenswrapper[5055]: I1011 07:19:51.488782 5055 scope.go:117] "RemoveContainer" containerID="4dee5f6fda69f3982630460428d43cda49bcbb6b70d07bed8e892c763c82310f" Oct 11 07:19:51 crc kubenswrapper[5055]: I1011 07:19:51.514713 5055 scope.go:117] "RemoveContainer" containerID="413da539315ac7f5d02d327de99cd69f5a9c19ea438f0509a84e853069c698fa" Oct 11 07:19:51 crc kubenswrapper[5055]: I1011 07:19:51.547358 5055 scope.go:117] "RemoveContainer" containerID="525efc8a319ad46dfee6df76645ecba02f42e0c169294d33d2d7a6d571e09063" Oct 11 07:19:51 crc kubenswrapper[5055]: I1011 07:19:51.580815 5055 scope.go:117] "RemoveContainer" containerID="a2009069b1b6468e98ce6fd276203569c2cc14f3b2151d6fb53c24bf78249492" Oct 11 07:19:51 crc kubenswrapper[5055]: I1011 07:19:51.596367 5055 scope.go:117] "RemoveContainer" containerID="db55481d25e075d5a8749f7cb0165db216a25a2edbe0f1d4b9d68ef4315be0ed" Oct 11 07:20:02 crc kubenswrapper[5055]: I1011 07:20:02.994366 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:20:02 crc kubenswrapper[5055]: E1011 07:20:02.995158 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:20:14 crc kubenswrapper[5055]: I1011 07:20:14.994238 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:20:14 crc kubenswrapper[5055]: E1011 07:20:14.995210 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:20:25 crc kubenswrapper[5055]: I1011 07:20:25.993755 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:20:25 crc kubenswrapper[5055]: E1011 07:20:25.995257 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:20:40 crc kubenswrapper[5055]: I1011 07:20:40.993875 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:20:40 crc kubenswrapper[5055]: E1011 07:20:40.995003 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:20:51 crc kubenswrapper[5055]: I1011 07:20:51.712414 5055 scope.go:117] "RemoveContainer" containerID="6451ddf3c93d11d13c19726cabdb7eab83c68b055d4ea78ef5eba294d82d05d6" Oct 11 07:20:51 crc kubenswrapper[5055]: I1011 07:20:51.744152 5055 scope.go:117] "RemoveContainer" containerID="d5826852f5a7ec193e383ce9c7b40ef54e48636bd04ea02fbdc3e35d19b254ef" Oct 11 07:20:51 crc kubenswrapper[5055]: I1011 07:20:51.782726 5055 scope.go:117] "RemoveContainer" containerID="fe652d39e7e6e3d258f7f666f03e71db8cb9fbfd39a1541489d6565528bf31fc" Oct 11 07:20:51 crc kubenswrapper[5055]: I1011 07:20:51.808790 5055 scope.go:117] "RemoveContainer" containerID="c875a88f3743d3993dd9a3e16831b0a6d030be3ef2be3e9f3ed67168c54bcd11" Oct 11 07:20:51 crc kubenswrapper[5055]: I1011 07:20:51.850593 5055 scope.go:117] "RemoveContainer" containerID="5acf172225cdf40a6f216adcb336b961ebecebc35c031e3ef6b0bdf717685fd2" Oct 11 07:20:51 crc kubenswrapper[5055]: I1011 07:20:51.874321 5055 scope.go:117] "RemoveContainer" containerID="d09c619816ad3608f0b8742bcf9e0a74adc342bdff3f2bda266f3f8ea0d63c19" Oct 11 07:20:51 crc kubenswrapper[5055]: I1011 07:20:51.916628 5055 scope.go:117] "RemoveContainer" containerID="c1d684263aaf87efbcea506b560a28b1b946789bf372a768d36acec5fea4d022" Oct 11 07:20:51 crc kubenswrapper[5055]: I1011 07:20:51.936153 5055 scope.go:117] "RemoveContainer" containerID="671b6200e8eab05242809aab3fde8beb4a2d22eb5c3a00ba72673a18bd57b820" Oct 11 07:20:51 crc kubenswrapper[5055]: I1011 07:20:51.955697 5055 scope.go:117] "RemoveContainer" containerID="1a9dd45ee3e83283bc4c67687d9a75523999a033d2bab86f134ae42061e8bef0" Oct 11 07:20:51 crc kubenswrapper[5055]: I1011 07:20:51.978327 5055 scope.go:117] "RemoveContainer" containerID="47ded605de4372a0e91cff570a44e5fbe01c76b283e1b8e5907eeaa5d52389bc" Oct 11 07:20:52 crc kubenswrapper[5055]: I1011 07:20:52.009742 5055 scope.go:117] "RemoveContainer" containerID="bd47c1e5c762bf3369cf0794a7bd76c76e644f7f839b5ceb228042a88aed775e" Oct 11 07:20:52 crc kubenswrapper[5055]: I1011 07:20:52.025153 5055 scope.go:117] "RemoveContainer" containerID="81e8ee154acdddce8902a40492fdd451667f1d4ec00f3aebf1bba9058dae907a" Oct 11 07:20:52 crc kubenswrapper[5055]: I1011 07:20:52.054151 5055 scope.go:117] "RemoveContainer" containerID="1dd6a3d80213a8d25175233d04091817c1abddfa820fae04faffb4f6ff9eb010" Oct 11 07:20:52 crc kubenswrapper[5055]: I1011 07:20:52.070504 5055 scope.go:117] "RemoveContainer" containerID="a0a0abf70d2c007bb1c49c14a15f4843ffb20deb9122e77b28327e5e2a26d9c6" Oct 11 07:20:52 crc kubenswrapper[5055]: I1011 07:20:52.100739 5055 scope.go:117] "RemoveContainer" containerID="003684d134598af05a17bfa081c9fbb69ccc7d05883cb34ca560f38b2dc5eec6" Oct 11 07:20:52 crc kubenswrapper[5055]: I1011 07:20:52.116039 5055 scope.go:117] "RemoveContainer" containerID="bc476925d6ccbab654212a5e1a766d13c073fe400387165deb5be1868fea364f" Oct 11 07:20:52 crc kubenswrapper[5055]: I1011 07:20:52.131003 5055 scope.go:117] "RemoveContainer" containerID="4d0f46837749fa8e6cb46bd60b4aed62acdaa53ca62154c632c0c613c678b675" Oct 11 07:20:52 crc kubenswrapper[5055]: I1011 07:20:52.147820 5055 scope.go:117] "RemoveContainer" containerID="cb16ae1b9b332802488fc88c04afb8c3ed39071dd545eb4b99255d83d4e3c16a" Oct 11 07:20:52 crc kubenswrapper[5055]: I1011 07:20:52.164985 5055 scope.go:117] "RemoveContainer" containerID="2bbd19f08b9f56aadd0332921f0d27aafe6bbf0e9122095351a0b2e167b99652" Oct 11 07:20:52 crc kubenswrapper[5055]: I1011 07:20:52.190083 5055 scope.go:117] "RemoveContainer" containerID="0d961448058531e90b1f8b9d6580cc50c771d581b5d95b0d8f2314976f162c41" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.276989 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-s2gct"] Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.277785 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-replicator" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.277806 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-replicator" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.277821 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-auditor" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.277832 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-auditor" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.277845 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cf968bd-0ddb-4647-8c76-8038cb19d053" containerName="barbican-keystone-listener-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.277892 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cf968bd-0ddb-4647-8c76-8038cb19d053" containerName="barbican-keystone-listener-log" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.277917 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-auditor" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.277928 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-auditor" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.277945 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7e77464-d0e5-4a9b-97a1-557cac810654" containerName="placement-api" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.277955 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7e77464-d0e5-4a9b-97a1-557cac810654" containerName="placement-api" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.277979 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71344487-c426-47fe-85cb-927c97465a5b" containerName="glance-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.277989 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="71344487-c426-47fe-85cb-927c97465a5b" containerName="glance-log" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278001 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278011 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278026 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6c56af9-552f-4b5a-9648-f28af1328534" containerName="mysql-bootstrap" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278038 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6c56af9-552f-4b5a-9648-f28af1328534" containerName="mysql-bootstrap" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278061 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovsdb-server" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278072 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovsdb-server" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278094 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bc70ff6-8410-4873-8030-2981e62e73f0" containerName="cinder-scheduler" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278107 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bc70ff6-8410-4873-8030-2981e62e73f0" containerName="cinder-scheduler" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278123 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bd3f46f-87e3-42e7-a37d-e746fb601f60" containerName="barbican-worker" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278134 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bd3f46f-87e3-42e7-a37d-e746fb601f60" containerName="barbican-worker" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278151 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2e0c881-0205-4317-ba03-cfb76f0f69e6" containerName="nova-scheduler-scheduler" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278161 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2e0c881-0205-4317-ba03-cfb76f0f69e6" containerName="nova-scheduler-scheduler" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278174 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fdc6f8c-1c91-45d3-80bc-d07c5953a385" containerName="extract-utilities" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278184 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fdc6f8c-1c91-45d3-80bc-d07c5953a385" containerName="extract-utilities" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278205 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7e77464-d0e5-4a9b-97a1-557cac810654" containerName="placement-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278215 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7e77464-d0e5-4a9b-97a1-557cac810654" containerName="placement-log" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278236 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f76ee4c-bc76-492a-898d-def8ec69e291" containerName="proxy-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278261 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f76ee4c-bc76-492a-898d-def8ec69e291" containerName="proxy-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278276 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af2ab03e-3011-4d6c-bbd5-ac5453d7785d" containerName="kube-state-metrics" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278288 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="af2ab03e-3011-4d6c-bbd5-ac5453d7785d" containerName="kube-state-metrics" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278304 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9b61d74-f126-4515-ba61-151f8ec0b48c" containerName="neutron-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278314 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9b61d74-f126-4515-ba61-151f8ec0b48c" containerName="neutron-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278326 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bc70ff6-8410-4873-8030-2981e62e73f0" containerName="probe" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278337 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bc70ff6-8410-4873-8030-2981e62e73f0" containerName="probe" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278351 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f99e172e-068d-423a-b797-2467e352a41b" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278362 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f99e172e-068d-423a-b797-2467e352a41b" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278380 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01de7a83-4a08-494a-a3b9-774e6787e30f" containerName="galera" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278390 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="01de7a83-4a08-494a-a3b9-774e6787e30f" containerName="galera" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278403 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5108504f-f2dd-4f43-8d7a-f630e055d661" containerName="keystone-api" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278412 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="5108504f-f2dd-4f43-8d7a-f630e055d661" containerName="keystone-api" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278425 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6c56af9-552f-4b5a-9648-f28af1328534" containerName="galera" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278435 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6c56af9-552f-4b5a-9648-f28af1328534" containerName="galera" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278447 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovs-vswitchd" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278457 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovs-vswitchd" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278472 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="324f6d50-368a-4f8c-8cee-4a9ff543ba31" containerName="barbican-api-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278482 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="324f6d50-368a-4f8c-8cee-4a9ff543ba31" containerName="barbican-api-log" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278501 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-replicator" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278513 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-replicator" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278525 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fdc6f8c-1c91-45d3-80bc-d07c5953a385" containerName="registry-server" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278535 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fdc6f8c-1c91-45d3-80bc-d07c5953a385" containerName="registry-server" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278550 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8950328a-3275-4ba9-8bd8-ea4940f2eaac" containerName="cinder-api-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278560 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="8950328a-3275-4ba9-8bd8-ea4940f2eaac" containerName="cinder-api-log" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278573 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01e71e0c-56f0-4bfc-bd58-501785a5d235" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278583 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="01e71e0c-56f0-4bfc-bd58-501785a5d235" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278593 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50ae0390-2ce4-47fe-88af-831aee265a6c" containerName="glance-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278603 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="50ae0390-2ce4-47fe-88af-831aee265a6c" containerName="glance-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278619 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="056ae819-c243-4aa7-8214-1432f198dcab" containerName="nova-metadata-metadata" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278629 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="056ae819-c243-4aa7-8214-1432f198dcab" containerName="nova-metadata-metadata" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278643 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-replicator" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278653 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-replicator" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278666 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-server" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278676 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-server" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278687 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="056ae819-c243-4aa7-8214-1432f198dcab" containerName="nova-metadata-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278696 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="056ae819-c243-4aa7-8214-1432f198dcab" containerName="nova-metadata-log" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278715 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6baacc00-a270-4662-ba67-aad18287df2c" containerName="rabbitmq" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278724 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="6baacc00-a270-4662-ba67-aad18287df2c" containerName="rabbitmq" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278737 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-server" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278749 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-server" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278791 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50ae0390-2ce4-47fe-88af-831aee265a6c" containerName="glance-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278804 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="50ae0390-2ce4-47fe-88af-831aee265a6c" containerName="glance-log" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278819 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80597a79-e3fd-41cd-b035-a35494775fcb" containerName="rabbitmq" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278830 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="80597a79-e3fd-41cd-b035-a35494775fcb" containerName="rabbitmq" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278849 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="swift-recon-cron" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278860 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="swift-recon-cron" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278877 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bd3f46f-87e3-42e7-a37d-e746fb601f60" containerName="barbican-worker-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278887 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bd3f46f-87e3-42e7-a37d-e746fb601f60" containerName="barbican-worker-log" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278907 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="proxy-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278917 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="proxy-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278936 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6baacc00-a270-4662-ba67-aad18287df2c" containerName="setup-container" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278945 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="6baacc00-a270-4662-ba67-aad18287df2c" containerName="setup-container" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.278964 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="rsync" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.278975 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="rsync" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279004 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01de7a83-4a08-494a-a3b9-774e6787e30f" containerName="mysql-bootstrap" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279015 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="01de7a83-4a08-494a-a3b9-774e6787e30f" containerName="mysql-bootstrap" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279027 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f76ee4c-bc76-492a-898d-def8ec69e291" containerName="proxy-server" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279038 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f76ee4c-bc76-492a-898d-def8ec69e291" containerName="proxy-server" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279056 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="324f6d50-368a-4f8c-8cee-4a9ff543ba31" containerName="barbican-api" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279065 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="324f6d50-368a-4f8c-8cee-4a9ff543ba31" containerName="barbican-api" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279082 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f360ed99-6efd-4281-a18b-7597df5341b6" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279092 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f360ed99-6efd-4281-a18b-7597df5341b6" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279115 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="sg-core" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279125 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="sg-core" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279141 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovsdb-server-init" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279152 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovsdb-server-init" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279171 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86907505-2c19-41ed-b72d-0b8bbeab1eec" containerName="openstack-network-exporter" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279196 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="86907505-2c19-41ed-b72d-0b8bbeab1eec" containerName="openstack-network-exporter" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279212 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-updater" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279222 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-updater" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279237 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-auditor" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279247 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-auditor" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279261 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-reaper" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279271 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-reaper" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279285 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fdc6f8c-1c91-45d3-80bc-d07c5953a385" containerName="extract-content" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279293 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fdc6f8c-1c91-45d3-80bc-d07c5953a385" containerName="extract-content" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279312 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-server" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279322 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-server" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279338 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9eb6d469-724e-4dc7-943c-60454db39c60" containerName="nova-cell1-novncproxy-novncproxy" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279350 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="9eb6d469-724e-4dc7-943c-60454db39c60" containerName="nova-cell1-novncproxy-novncproxy" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279361 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="ceilometer-notification-agent" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279371 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="ceilometer-notification-agent" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279384 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8950328a-3275-4ba9-8bd8-ea4940f2eaac" containerName="cinder-api" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279394 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="8950328a-3275-4ba9-8bd8-ea4940f2eaac" containerName="cinder-api" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279409 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-updater" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279420 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-updater" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279437 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="451f7ab0-0020-4931-a42f-f56eb5b28201" containerName="nova-api-api" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279447 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="451f7ab0-0020-4931-a42f-f56eb5b28201" containerName="nova-api-api" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279464 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86907505-2c19-41ed-b72d-0b8bbeab1eec" containerName="ovn-northd" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279474 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="86907505-2c19-41ed-b72d-0b8bbeab1eec" containerName="ovn-northd" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279490 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="ceilometer-central-agent" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279501 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="ceilometer-central-agent" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279515 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cf968bd-0ddb-4647-8c76-8038cb19d053" containerName="barbican-keystone-listener" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279524 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cf968bd-0ddb-4647-8c76-8038cb19d053" containerName="barbican-keystone-listener" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279539 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-expirer" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279549 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-expirer" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279567 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a52d356-2098-4a61-be67-5519a2d2b78f" containerName="nova-cell1-conductor-conductor" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279577 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a52d356-2098-4a61-be67-5519a2d2b78f" containerName="nova-cell1-conductor-conductor" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279589 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bebbd2e-a315-493d-820b-69e8dc749ee1" containerName="memcached" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279598 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bebbd2e-a315-493d-820b-69e8dc749ee1" containerName="memcached" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279615 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80597a79-e3fd-41cd-b035-a35494775fcb" containerName="setup-container" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279625 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="80597a79-e3fd-41cd-b035-a35494775fcb" containerName="setup-container" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279642 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9b61d74-f126-4515-ba61-151f8ec0b48c" containerName="neutron-api" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279653 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9b61d74-f126-4515-ba61-151f8ec0b48c" containerName="neutron-api" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279663 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71344487-c426-47fe-85cb-927c97465a5b" containerName="glance-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279676 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="71344487-c426-47fe-85cb-927c97465a5b" containerName="glance-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279688 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c647ccff-95d1-467a-9fcd-4b5110772361" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279699 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c647ccff-95d1-467a-9fcd-4b5110772361" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279716 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279728 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279745 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="451f7ab0-0020-4931-a42f-f56eb5b28201" containerName="nova-api-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279756 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="451f7ab0-0020-4931-a42f-f56eb5b28201" containerName="nova-api-log" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279797 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acf7edb2-ebde-4783-b428-b6ce809be9f9" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279809 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="acf7edb2-ebde-4783-b428-b6ce809be9f9" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.279823 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ff3f14d-ff09-4533-89d6-e53afbda0dd6" containerName="nova-cell0-conductor-conductor" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.279834 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ff3f14d-ff09-4533-89d6-e53afbda0dd6" containerName="nova-cell0-conductor-conductor" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280064 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-auditor" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280089 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-updater" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280099 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="af2ab03e-3011-4d6c-bbd5-ac5453d7785d" containerName="kube-state-metrics" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280114 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="sg-core" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280133 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-replicator" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280151 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cf968bd-0ddb-4647-8c76-8038cb19d053" containerName="barbican-keystone-listener-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280162 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-server" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280181 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-expirer" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280195 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="01e71e0c-56f0-4bfc-bd58-501785a5d235" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280214 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="056ae819-c243-4aa7-8214-1432f198dcab" containerName="nova-metadata-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280229 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="451f7ab0-0020-4931-a42f-f56eb5b28201" containerName="nova-api-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280242 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fdc6f8c-1c91-45d3-80bc-d07c5953a385" containerName="registry-server" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280260 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="056ae819-c243-4aa7-8214-1432f198dcab" containerName="nova-metadata-metadata" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280277 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9b61d74-f126-4515-ba61-151f8ec0b48c" containerName="neutron-api" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280291 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="86907505-2c19-41ed-b72d-0b8bbeab1eec" containerName="ovn-northd" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280301 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="324f6d50-368a-4f8c-8cee-4a9ff543ba31" containerName="barbican-api-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280314 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="acf7edb2-ebde-4783-b428-b6ce809be9f9" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280326 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="71344487-c426-47fe-85cb-927c97465a5b" containerName="glance-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280338 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bebbd2e-a315-493d-820b-69e8dc749ee1" containerName="memcached" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280353 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-updater" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280367 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bd3f46f-87e3-42e7-a37d-e746fb601f60" containerName="barbican-worker-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280384 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7e77464-d0e5-4a9b-97a1-557cac810654" containerName="placement-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280395 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="71344487-c426-47fe-85cb-927c97465a5b" containerName="glance-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280407 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ff3f14d-ff09-4533-89d6-e53afbda0dd6" containerName="nova-cell0-conductor-conductor" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280425 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovsdb-server" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280436 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-auditor" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280455 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bd3f46f-87e3-42e7-a37d-e746fb601f60" containerName="barbican-worker" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280465 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f76ee4c-bc76-492a-898d-def8ec69e291" containerName="proxy-server" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280481 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="swift-recon-cron" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280494 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bc70ff6-8410-4873-8030-2981e62e73f0" containerName="cinder-scheduler" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280504 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="84fd6e7a-8996-4bb8-932f-a0a22a7a0cd7" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280515 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="86907505-2c19-41ed-b72d-0b8bbeab1eec" containerName="openstack-network-exporter" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280532 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="324f6d50-368a-4f8c-8cee-4a9ff543ba31" containerName="barbican-api" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280546 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb4ddf0-3311-4e4a-bb7c-3e77b7ea3241" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280556 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="80597a79-e3fd-41cd-b035-a35494775fcb" containerName="rabbitmq" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280567 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="proxy-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280584 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="8950328a-3275-4ba9-8bd8-ea4940f2eaac" containerName="cinder-api-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280596 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="451f7ab0-0020-4931-a42f-f56eb5b28201" containerName="nova-api-api" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280609 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="50ae0390-2ce4-47fe-88af-831aee265a6c" containerName="glance-log" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280647 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7e77464-d0e5-4a9b-97a1-557cac810654" containerName="placement-api" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280662 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="01de7a83-4a08-494a-a3b9-774e6787e30f" containerName="galera" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280676 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-replicator" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280693 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="50ae0390-2ce4-47fe-88af-831aee265a6c" containerName="glance-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280708 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-replicator" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280719 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="9eb6d469-724e-4dc7-943c-60454db39c60" containerName="nova-cell1-novncproxy-novncproxy" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280729 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c647ccff-95d1-467a-9fcd-4b5110772361" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280759 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="rsync" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280800 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cf968bd-0ddb-4647-8c76-8038cb19d053" containerName="barbican-keystone-listener" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280813 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9b61d74-f126-4515-ba61-151f8ec0b48c" containerName="neutron-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280826 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bc70ff6-8410-4873-8030-2981e62e73f0" containerName="probe" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280844 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="8950328a-3275-4ba9-8bd8-ea4940f2eaac" containerName="cinder-api" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280855 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="5108504f-f2dd-4f43-8d7a-f630e055d661" containerName="keystone-api" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280874 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f76ee4c-bc76-492a-898d-def8ec69e291" containerName="proxy-httpd" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280887 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a52d356-2098-4a61-be67-5519a2d2b78f" containerName="nova-cell1-conductor-conductor" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280902 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2e0c881-0205-4317-ba03-cfb76f0f69e6" containerName="nova-scheduler-scheduler" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280917 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f360ed99-6efd-4281-a18b-7597df5341b6" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280927 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="ceilometer-central-agent" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280944 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="6baacc00-a270-4662-ba67-aad18287df2c" containerName="rabbitmq" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280956 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="object-server" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.280973 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-reaper" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.281018 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6c56af9-552f-4b5a-9648-f28af1328534" containerName="galera" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.281031 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5b9917d-52e6-4d08-a131-4d1b8b809161" containerName="ovs-vswitchd" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.281042 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cc22641-a524-415a-8377-580664ed5d90" containerName="ceilometer-notification-agent" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.281060 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="account-auditor" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.281078 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f75249-4d9f-44bf-af62-de6757d2326a" containerName="container-server" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.281095 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f99e172e-068d-423a-b797-2467e352a41b" containerName="mariadb-account-delete" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.282689 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.292109 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s2gct"] Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.364787 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gkds\" (UniqueName: \"kubernetes.io/projected/7720ba37-80aa-4e0e-bc26-d663831cd2ca-kube-api-access-7gkds\") pod \"redhat-operators-s2gct\" (UID: \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\") " pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.364905 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7720ba37-80aa-4e0e-bc26-d663831cd2ca-utilities\") pod \"redhat-operators-s2gct\" (UID: \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\") " pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.365001 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7720ba37-80aa-4e0e-bc26-d663831cd2ca-catalog-content\") pod \"redhat-operators-s2gct\" (UID: \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\") " pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.466430 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7720ba37-80aa-4e0e-bc26-d663831cd2ca-utilities\") pod \"redhat-operators-s2gct\" (UID: \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\") " pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.466523 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7720ba37-80aa-4e0e-bc26-d663831cd2ca-catalog-content\") pod \"redhat-operators-s2gct\" (UID: \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\") " pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.466545 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gkds\" (UniqueName: \"kubernetes.io/projected/7720ba37-80aa-4e0e-bc26-d663831cd2ca-kube-api-access-7gkds\") pod \"redhat-operators-s2gct\" (UID: \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\") " pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.467002 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7720ba37-80aa-4e0e-bc26-d663831cd2ca-utilities\") pod \"redhat-operators-s2gct\" (UID: \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\") " pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.467060 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7720ba37-80aa-4e0e-bc26-d663831cd2ca-catalog-content\") pod \"redhat-operators-s2gct\" (UID: \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\") " pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.485177 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gkds\" (UniqueName: \"kubernetes.io/projected/7720ba37-80aa-4e0e-bc26-d663831cd2ca-kube-api-access-7gkds\") pod \"redhat-operators-s2gct\" (UID: \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\") " pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.604451 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.870156 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-q9t54"] Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.872202 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.879650 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q9t54"] Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.971897 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtjvv\" (UniqueName: \"kubernetes.io/projected/8859c7aa-75b4-404d-9427-55c33383cf64-kube-api-access-jtjvv\") pod \"community-operators-q9t54\" (UID: \"8859c7aa-75b4-404d-9427-55c33383cf64\") " pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.971978 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8859c7aa-75b4-404d-9427-55c33383cf64-utilities\") pod \"community-operators-q9t54\" (UID: \"8859c7aa-75b4-404d-9427-55c33383cf64\") " pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.972102 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8859c7aa-75b4-404d-9427-55c33383cf64-catalog-content\") pod \"community-operators-q9t54\" (UID: \"8859c7aa-75b4-404d-9427-55c33383cf64\") " pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:20:55 crc kubenswrapper[5055]: I1011 07:20:55.993597 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:20:55 crc kubenswrapper[5055]: E1011 07:20:55.993839 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.017945 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s2gct"] Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.073883 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8859c7aa-75b4-404d-9427-55c33383cf64-catalog-content\") pod \"community-operators-q9t54\" (UID: \"8859c7aa-75b4-404d-9427-55c33383cf64\") " pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.073982 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtjvv\" (UniqueName: \"kubernetes.io/projected/8859c7aa-75b4-404d-9427-55c33383cf64-kube-api-access-jtjvv\") pod \"community-operators-q9t54\" (UID: \"8859c7aa-75b4-404d-9427-55c33383cf64\") " pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.074072 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8859c7aa-75b4-404d-9427-55c33383cf64-utilities\") pod \"community-operators-q9t54\" (UID: \"8859c7aa-75b4-404d-9427-55c33383cf64\") " pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.074693 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8859c7aa-75b4-404d-9427-55c33383cf64-catalog-content\") pod \"community-operators-q9t54\" (UID: \"8859c7aa-75b4-404d-9427-55c33383cf64\") " pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.074787 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8859c7aa-75b4-404d-9427-55c33383cf64-utilities\") pod \"community-operators-q9t54\" (UID: \"8859c7aa-75b4-404d-9427-55c33383cf64\") " pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.093622 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtjvv\" (UniqueName: \"kubernetes.io/projected/8859c7aa-75b4-404d-9427-55c33383cf64-kube-api-access-jtjvv\") pod \"community-operators-q9t54\" (UID: \"8859c7aa-75b4-404d-9427-55c33383cf64\") " pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.199952 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.414724 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q9t54"] Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.660971 5055 generic.go:334] "Generic (PLEG): container finished" podID="7720ba37-80aa-4e0e-bc26-d663831cd2ca" containerID="8b55717f69d835eefd59bd8ee9c98aa94db674ce2ef6c9119e284155541f07b9" exitCode=0 Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.661165 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2gct" event={"ID":"7720ba37-80aa-4e0e-bc26-d663831cd2ca","Type":"ContainerDied","Data":"8b55717f69d835eefd59bd8ee9c98aa94db674ce2ef6c9119e284155541f07b9"} Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.661348 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2gct" event={"ID":"7720ba37-80aa-4e0e-bc26-d663831cd2ca","Type":"ContainerStarted","Data":"6e88439a7403bd69d7d4aaa307e1a94efd0f6c0925dcb92b88c3f1a921b78e3e"} Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.662839 5055 generic.go:334] "Generic (PLEG): container finished" podID="8859c7aa-75b4-404d-9427-55c33383cf64" containerID="5c23b83e4686931ace4114540757d1810bb754a7930bd291b7be995d25df0d88" exitCode=0 Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.662886 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q9t54" event={"ID":"8859c7aa-75b4-404d-9427-55c33383cf64","Type":"ContainerDied","Data":"5c23b83e4686931ace4114540757d1810bb754a7930bd291b7be995d25df0d88"} Oct 11 07:20:56 crc kubenswrapper[5055]: I1011 07:20:56.662913 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q9t54" event={"ID":"8859c7aa-75b4-404d-9427-55c33383cf64","Type":"ContainerStarted","Data":"6e8fac9d1d03cc80a182162e210ee159d52b22e580e441dbed09ad241efd71e4"} Oct 11 07:20:57 crc kubenswrapper[5055]: I1011 07:20:57.671651 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2gct" event={"ID":"7720ba37-80aa-4e0e-bc26-d663831cd2ca","Type":"ContainerStarted","Data":"5055c7c1ce1ec0b33c47ecf3956cd7287479b925b04e017f7700b99e2ff03bc3"} Oct 11 07:20:57 crc kubenswrapper[5055]: I1011 07:20:57.674031 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q9t54" event={"ID":"8859c7aa-75b4-404d-9427-55c33383cf64","Type":"ContainerStarted","Data":"b58364aa96de69ba3280cfae05c8f273d924de0b791a31b47fb09ab039357db1"} Oct 11 07:20:58 crc kubenswrapper[5055]: I1011 07:20:58.683352 5055 generic.go:334] "Generic (PLEG): container finished" podID="7720ba37-80aa-4e0e-bc26-d663831cd2ca" containerID="5055c7c1ce1ec0b33c47ecf3956cd7287479b925b04e017f7700b99e2ff03bc3" exitCode=0 Oct 11 07:20:58 crc kubenswrapper[5055]: I1011 07:20:58.683465 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2gct" event={"ID":"7720ba37-80aa-4e0e-bc26-d663831cd2ca","Type":"ContainerDied","Data":"5055c7c1ce1ec0b33c47ecf3956cd7287479b925b04e017f7700b99e2ff03bc3"} Oct 11 07:20:58 crc kubenswrapper[5055]: I1011 07:20:58.685645 5055 generic.go:334] "Generic (PLEG): container finished" podID="8859c7aa-75b4-404d-9427-55c33383cf64" containerID="b58364aa96de69ba3280cfae05c8f273d924de0b791a31b47fb09ab039357db1" exitCode=0 Oct 11 07:20:58 crc kubenswrapper[5055]: I1011 07:20:58.685682 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q9t54" event={"ID":"8859c7aa-75b4-404d-9427-55c33383cf64","Type":"ContainerDied","Data":"b58364aa96de69ba3280cfae05c8f273d924de0b791a31b47fb09ab039357db1"} Oct 11 07:20:59 crc kubenswrapper[5055]: I1011 07:20:59.694756 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2gct" event={"ID":"7720ba37-80aa-4e0e-bc26-d663831cd2ca","Type":"ContainerStarted","Data":"4f477f978a0ffed2ffaef9b9bf1a6104eedc7d36c1db0447cc8ff22fd7809651"} Oct 11 07:20:59 crc kubenswrapper[5055]: I1011 07:20:59.697388 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q9t54" event={"ID":"8859c7aa-75b4-404d-9427-55c33383cf64","Type":"ContainerStarted","Data":"c540bf5147358349d120ed9f014605dc6ff80bb37f2a6cdfdf8279fd1620d232"} Oct 11 07:20:59 crc kubenswrapper[5055]: I1011 07:20:59.711895 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-s2gct" podStartSLOduration=1.954292886 podStartE2EDuration="4.711877031s" podCreationTimestamp="2025-10-11 07:20:55 +0000 UTC" firstStartedPulling="2025-10-11 07:20:56.662354585 +0000 UTC m=+1640.436628392" lastFinishedPulling="2025-10-11 07:20:59.41993872 +0000 UTC m=+1643.194212537" observedRunningTime="2025-10-11 07:20:59.709596234 +0000 UTC m=+1643.483870051" watchObservedRunningTime="2025-10-11 07:20:59.711877031 +0000 UTC m=+1643.486150838" Oct 11 07:20:59 crc kubenswrapper[5055]: I1011 07:20:59.733005 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-q9t54" podStartSLOduration=2.345741191 podStartE2EDuration="4.732988747s" podCreationTimestamp="2025-10-11 07:20:55 +0000 UTC" firstStartedPulling="2025-10-11 07:20:56.664121277 +0000 UTC m=+1640.438395084" lastFinishedPulling="2025-10-11 07:20:59.051368833 +0000 UTC m=+1642.825642640" observedRunningTime="2025-10-11 07:20:59.730728871 +0000 UTC m=+1643.505002678" watchObservedRunningTime="2025-10-11 07:20:59.732988747 +0000 UTC m=+1643.507262554" Oct 11 07:21:05 crc kubenswrapper[5055]: I1011 07:21:05.605704 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:21:05 crc kubenswrapper[5055]: I1011 07:21:05.606802 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:21:05 crc kubenswrapper[5055]: I1011 07:21:05.648991 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:21:05 crc kubenswrapper[5055]: I1011 07:21:05.807774 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:21:05 crc kubenswrapper[5055]: I1011 07:21:05.890298 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-s2gct"] Oct 11 07:21:06 crc kubenswrapper[5055]: I1011 07:21:06.200926 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:21:06 crc kubenswrapper[5055]: I1011 07:21:06.201257 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:21:06 crc kubenswrapper[5055]: I1011 07:21:06.244831 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:21:06 crc kubenswrapper[5055]: I1011 07:21:06.787130 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:21:06 crc kubenswrapper[5055]: I1011 07:21:06.999124 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:21:06 crc kubenswrapper[5055]: E1011 07:21:06.999535 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:21:07 crc kubenswrapper[5055]: I1011 07:21:07.755375 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-s2gct" podUID="7720ba37-80aa-4e0e-bc26-d663831cd2ca" containerName="registry-server" containerID="cri-o://4f477f978a0ffed2ffaef9b9bf1a6104eedc7d36c1db0447cc8ff22fd7809651" gracePeriod=2 Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.129161 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.240155 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gkds\" (UniqueName: \"kubernetes.io/projected/7720ba37-80aa-4e0e-bc26-d663831cd2ca-kube-api-access-7gkds\") pod \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\" (UID: \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\") " Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.240255 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7720ba37-80aa-4e0e-bc26-d663831cd2ca-catalog-content\") pod \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\" (UID: \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\") " Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.240306 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7720ba37-80aa-4e0e-bc26-d663831cd2ca-utilities\") pod \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\" (UID: \"7720ba37-80aa-4e0e-bc26-d663831cd2ca\") " Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.241325 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7720ba37-80aa-4e0e-bc26-d663831cd2ca-utilities" (OuterVolumeSpecName: "utilities") pod "7720ba37-80aa-4e0e-bc26-d663831cd2ca" (UID: "7720ba37-80aa-4e0e-bc26-d663831cd2ca"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.245226 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7720ba37-80aa-4e0e-bc26-d663831cd2ca-kube-api-access-7gkds" (OuterVolumeSpecName: "kube-api-access-7gkds") pod "7720ba37-80aa-4e0e-bc26-d663831cd2ca" (UID: "7720ba37-80aa-4e0e-bc26-d663831cd2ca"). InnerVolumeSpecName "kube-api-access-7gkds". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.289397 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q9t54"] Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.342133 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7720ba37-80aa-4e0e-bc26-d663831cd2ca-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.342160 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7gkds\" (UniqueName: \"kubernetes.io/projected/7720ba37-80aa-4e0e-bc26-d663831cd2ca-kube-api-access-7gkds\") on node \"crc\" DevicePath \"\"" Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.764146 5055 generic.go:334] "Generic (PLEG): container finished" podID="7720ba37-80aa-4e0e-bc26-d663831cd2ca" containerID="4f477f978a0ffed2ffaef9b9bf1a6104eedc7d36c1db0447cc8ff22fd7809651" exitCode=0 Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.764198 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2gct" event={"ID":"7720ba37-80aa-4e0e-bc26-d663831cd2ca","Type":"ContainerDied","Data":"4f477f978a0ffed2ffaef9b9bf1a6104eedc7d36c1db0447cc8ff22fd7809651"} Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.764234 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s2gct" Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.764256 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2gct" event={"ID":"7720ba37-80aa-4e0e-bc26-d663831cd2ca","Type":"ContainerDied","Data":"6e88439a7403bd69d7d4aaa307e1a94efd0f6c0925dcb92b88c3f1a921b78e3e"} Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.764281 5055 scope.go:117] "RemoveContainer" containerID="4f477f978a0ffed2ffaef9b9bf1a6104eedc7d36c1db0447cc8ff22fd7809651" Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.764801 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-q9t54" podUID="8859c7aa-75b4-404d-9427-55c33383cf64" containerName="registry-server" containerID="cri-o://c540bf5147358349d120ed9f014605dc6ff80bb37f2a6cdfdf8279fd1620d232" gracePeriod=2 Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.789901 5055 scope.go:117] "RemoveContainer" containerID="5055c7c1ce1ec0b33c47ecf3956cd7287479b925b04e017f7700b99e2ff03bc3" Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.813247 5055 scope.go:117] "RemoveContainer" containerID="8b55717f69d835eefd59bd8ee9c98aa94db674ce2ef6c9119e284155541f07b9" Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.839005 5055 scope.go:117] "RemoveContainer" containerID="4f477f978a0ffed2ffaef9b9bf1a6104eedc7d36c1db0447cc8ff22fd7809651" Oct 11 07:21:08 crc kubenswrapper[5055]: E1011 07:21:08.839492 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f477f978a0ffed2ffaef9b9bf1a6104eedc7d36c1db0447cc8ff22fd7809651\": container with ID starting with 4f477f978a0ffed2ffaef9b9bf1a6104eedc7d36c1db0447cc8ff22fd7809651 not found: ID does not exist" containerID="4f477f978a0ffed2ffaef9b9bf1a6104eedc7d36c1db0447cc8ff22fd7809651" Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.839531 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f477f978a0ffed2ffaef9b9bf1a6104eedc7d36c1db0447cc8ff22fd7809651"} err="failed to get container status \"4f477f978a0ffed2ffaef9b9bf1a6104eedc7d36c1db0447cc8ff22fd7809651\": rpc error: code = NotFound desc = could not find container \"4f477f978a0ffed2ffaef9b9bf1a6104eedc7d36c1db0447cc8ff22fd7809651\": container with ID starting with 4f477f978a0ffed2ffaef9b9bf1a6104eedc7d36c1db0447cc8ff22fd7809651 not found: ID does not exist" Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.839560 5055 scope.go:117] "RemoveContainer" containerID="5055c7c1ce1ec0b33c47ecf3956cd7287479b925b04e017f7700b99e2ff03bc3" Oct 11 07:21:08 crc kubenswrapper[5055]: E1011 07:21:08.841026 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5055c7c1ce1ec0b33c47ecf3956cd7287479b925b04e017f7700b99e2ff03bc3\": container with ID starting with 5055c7c1ce1ec0b33c47ecf3956cd7287479b925b04e017f7700b99e2ff03bc3 not found: ID does not exist" containerID="5055c7c1ce1ec0b33c47ecf3956cd7287479b925b04e017f7700b99e2ff03bc3" Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.841068 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5055c7c1ce1ec0b33c47ecf3956cd7287479b925b04e017f7700b99e2ff03bc3"} err="failed to get container status \"5055c7c1ce1ec0b33c47ecf3956cd7287479b925b04e017f7700b99e2ff03bc3\": rpc error: code = NotFound desc = could not find container \"5055c7c1ce1ec0b33c47ecf3956cd7287479b925b04e017f7700b99e2ff03bc3\": container with ID starting with 5055c7c1ce1ec0b33c47ecf3956cd7287479b925b04e017f7700b99e2ff03bc3 not found: ID does not exist" Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.841085 5055 scope.go:117] "RemoveContainer" containerID="8b55717f69d835eefd59bd8ee9c98aa94db674ce2ef6c9119e284155541f07b9" Oct 11 07:21:08 crc kubenswrapper[5055]: E1011 07:21:08.841314 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b55717f69d835eefd59bd8ee9c98aa94db674ce2ef6c9119e284155541f07b9\": container with ID starting with 8b55717f69d835eefd59bd8ee9c98aa94db674ce2ef6c9119e284155541f07b9 not found: ID does not exist" containerID="8b55717f69d835eefd59bd8ee9c98aa94db674ce2ef6c9119e284155541f07b9" Oct 11 07:21:08 crc kubenswrapper[5055]: I1011 07:21:08.841344 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b55717f69d835eefd59bd8ee9c98aa94db674ce2ef6c9119e284155541f07b9"} err="failed to get container status \"8b55717f69d835eefd59bd8ee9c98aa94db674ce2ef6c9119e284155541f07b9\": rpc error: code = NotFound desc = could not find container \"8b55717f69d835eefd59bd8ee9c98aa94db674ce2ef6c9119e284155541f07b9\": container with ID starting with 8b55717f69d835eefd59bd8ee9c98aa94db674ce2ef6c9119e284155541f07b9 not found: ID does not exist" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.402710 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7720ba37-80aa-4e0e-bc26-d663831cd2ca-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7720ba37-80aa-4e0e-bc26-d663831cd2ca" (UID: "7720ba37-80aa-4e0e-bc26-d663831cd2ca"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.460032 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7720ba37-80aa-4e0e-bc26-d663831cd2ca-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.642570 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.696507 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-s2gct"] Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.702684 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-s2gct"] Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.763309 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8859c7aa-75b4-404d-9427-55c33383cf64-catalog-content\") pod \"8859c7aa-75b4-404d-9427-55c33383cf64\" (UID: \"8859c7aa-75b4-404d-9427-55c33383cf64\") " Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.763355 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8859c7aa-75b4-404d-9427-55c33383cf64-utilities\") pod \"8859c7aa-75b4-404d-9427-55c33383cf64\" (UID: \"8859c7aa-75b4-404d-9427-55c33383cf64\") " Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.763401 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtjvv\" (UniqueName: \"kubernetes.io/projected/8859c7aa-75b4-404d-9427-55c33383cf64-kube-api-access-jtjvv\") pod \"8859c7aa-75b4-404d-9427-55c33383cf64\" (UID: \"8859c7aa-75b4-404d-9427-55c33383cf64\") " Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.764102 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8859c7aa-75b4-404d-9427-55c33383cf64-utilities" (OuterVolumeSpecName: "utilities") pod "8859c7aa-75b4-404d-9427-55c33383cf64" (UID: "8859c7aa-75b4-404d-9427-55c33383cf64"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.770962 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8859c7aa-75b4-404d-9427-55c33383cf64-kube-api-access-jtjvv" (OuterVolumeSpecName: "kube-api-access-jtjvv") pod "8859c7aa-75b4-404d-9427-55c33383cf64" (UID: "8859c7aa-75b4-404d-9427-55c33383cf64"). InnerVolumeSpecName "kube-api-access-jtjvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.774364 5055 generic.go:334] "Generic (PLEG): container finished" podID="8859c7aa-75b4-404d-9427-55c33383cf64" containerID="c540bf5147358349d120ed9f014605dc6ff80bb37f2a6cdfdf8279fd1620d232" exitCode=0 Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.774398 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q9t54" event={"ID":"8859c7aa-75b4-404d-9427-55c33383cf64","Type":"ContainerDied","Data":"c540bf5147358349d120ed9f014605dc6ff80bb37f2a6cdfdf8279fd1620d232"} Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.774429 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q9t54" event={"ID":"8859c7aa-75b4-404d-9427-55c33383cf64","Type":"ContainerDied","Data":"6e8fac9d1d03cc80a182162e210ee159d52b22e580e441dbed09ad241efd71e4"} Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.774438 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q9t54" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.774447 5055 scope.go:117] "RemoveContainer" containerID="c540bf5147358349d120ed9f014605dc6ff80bb37f2a6cdfdf8279fd1620d232" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.804071 5055 scope.go:117] "RemoveContainer" containerID="b58364aa96de69ba3280cfae05c8f273d924de0b791a31b47fb09ab039357db1" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.816349 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8859c7aa-75b4-404d-9427-55c33383cf64-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8859c7aa-75b4-404d-9427-55c33383cf64" (UID: "8859c7aa-75b4-404d-9427-55c33383cf64"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.826843 5055 scope.go:117] "RemoveContainer" containerID="5c23b83e4686931ace4114540757d1810bb754a7930bd291b7be995d25df0d88" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.846270 5055 scope.go:117] "RemoveContainer" containerID="c540bf5147358349d120ed9f014605dc6ff80bb37f2a6cdfdf8279fd1620d232" Oct 11 07:21:09 crc kubenswrapper[5055]: E1011 07:21:09.854154 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c540bf5147358349d120ed9f014605dc6ff80bb37f2a6cdfdf8279fd1620d232\": container with ID starting with c540bf5147358349d120ed9f014605dc6ff80bb37f2a6cdfdf8279fd1620d232 not found: ID does not exist" containerID="c540bf5147358349d120ed9f014605dc6ff80bb37f2a6cdfdf8279fd1620d232" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.854285 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c540bf5147358349d120ed9f014605dc6ff80bb37f2a6cdfdf8279fd1620d232"} err="failed to get container status \"c540bf5147358349d120ed9f014605dc6ff80bb37f2a6cdfdf8279fd1620d232\": rpc error: code = NotFound desc = could not find container \"c540bf5147358349d120ed9f014605dc6ff80bb37f2a6cdfdf8279fd1620d232\": container with ID starting with c540bf5147358349d120ed9f014605dc6ff80bb37f2a6cdfdf8279fd1620d232 not found: ID does not exist" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.854327 5055 scope.go:117] "RemoveContainer" containerID="b58364aa96de69ba3280cfae05c8f273d924de0b791a31b47fb09ab039357db1" Oct 11 07:21:09 crc kubenswrapper[5055]: E1011 07:21:09.855163 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b58364aa96de69ba3280cfae05c8f273d924de0b791a31b47fb09ab039357db1\": container with ID starting with b58364aa96de69ba3280cfae05c8f273d924de0b791a31b47fb09ab039357db1 not found: ID does not exist" containerID="b58364aa96de69ba3280cfae05c8f273d924de0b791a31b47fb09ab039357db1" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.855243 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b58364aa96de69ba3280cfae05c8f273d924de0b791a31b47fb09ab039357db1"} err="failed to get container status \"b58364aa96de69ba3280cfae05c8f273d924de0b791a31b47fb09ab039357db1\": rpc error: code = NotFound desc = could not find container \"b58364aa96de69ba3280cfae05c8f273d924de0b791a31b47fb09ab039357db1\": container with ID starting with b58364aa96de69ba3280cfae05c8f273d924de0b791a31b47fb09ab039357db1 not found: ID does not exist" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.855275 5055 scope.go:117] "RemoveContainer" containerID="5c23b83e4686931ace4114540757d1810bb754a7930bd291b7be995d25df0d88" Oct 11 07:21:09 crc kubenswrapper[5055]: E1011 07:21:09.855723 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c23b83e4686931ace4114540757d1810bb754a7930bd291b7be995d25df0d88\": container with ID starting with 5c23b83e4686931ace4114540757d1810bb754a7930bd291b7be995d25df0d88 not found: ID does not exist" containerID="5c23b83e4686931ace4114540757d1810bb754a7930bd291b7be995d25df0d88" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.855815 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c23b83e4686931ace4114540757d1810bb754a7930bd291b7be995d25df0d88"} err="failed to get container status \"5c23b83e4686931ace4114540757d1810bb754a7930bd291b7be995d25df0d88\": rpc error: code = NotFound desc = could not find container \"5c23b83e4686931ace4114540757d1810bb754a7930bd291b7be995d25df0d88\": container with ID starting with 5c23b83e4686931ace4114540757d1810bb754a7930bd291b7be995d25df0d88 not found: ID does not exist" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.865161 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8859c7aa-75b4-404d-9427-55c33383cf64-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.865193 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8859c7aa-75b4-404d-9427-55c33383cf64-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:21:09 crc kubenswrapper[5055]: I1011 07:21:09.865205 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtjvv\" (UniqueName: \"kubernetes.io/projected/8859c7aa-75b4-404d-9427-55c33383cf64-kube-api-access-jtjvv\") on node \"crc\" DevicePath \"\"" Oct 11 07:21:10 crc kubenswrapper[5055]: I1011 07:21:10.100293 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q9t54"] Oct 11 07:21:10 crc kubenswrapper[5055]: I1011 07:21:10.109105 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-q9t54"] Oct 11 07:21:11 crc kubenswrapper[5055]: I1011 07:21:11.002293 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7720ba37-80aa-4e0e-bc26-d663831cd2ca" path="/var/lib/kubelet/pods/7720ba37-80aa-4e0e-bc26-d663831cd2ca/volumes" Oct 11 07:21:11 crc kubenswrapper[5055]: I1011 07:21:11.003374 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8859c7aa-75b4-404d-9427-55c33383cf64" path="/var/lib/kubelet/pods/8859c7aa-75b4-404d-9427-55c33383cf64/volumes" Oct 11 07:21:20 crc kubenswrapper[5055]: I1011 07:21:20.993464 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:21:20 crc kubenswrapper[5055]: E1011 07:21:20.994399 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:21:33 crc kubenswrapper[5055]: I1011 07:21:33.993788 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:21:33 crc kubenswrapper[5055]: E1011 07:21:33.994655 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:21:46 crc kubenswrapper[5055]: I1011 07:21:46.997056 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:21:46 crc kubenswrapper[5055]: E1011 07:21:46.998483 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:21:52 crc kubenswrapper[5055]: I1011 07:21:52.422642 5055 scope.go:117] "RemoveContainer" containerID="84af3482a1b5087b3f9fce032a75fe9ea67e0df26b99966077869dc24981e2fa" Oct 11 07:21:52 crc kubenswrapper[5055]: I1011 07:21:52.483664 5055 scope.go:117] "RemoveContainer" containerID="940d3d968c907a34fee8725aa2422f658c7cb351eaa5986851072ce1d5bddcde" Oct 11 07:21:52 crc kubenswrapper[5055]: I1011 07:21:52.502039 5055 scope.go:117] "RemoveContainer" containerID="ce6c4a40a3a9d2f8beefe4abf12d5b0968a77827509a363909f19a741d4bd9d0" Oct 11 07:21:52 crc kubenswrapper[5055]: I1011 07:21:52.529171 5055 scope.go:117] "RemoveContainer" containerID="608490c678382fe22f1f2f0a325b741a076db7f6aa12df1a4bfcf555d6b2291b" Oct 11 07:21:52 crc kubenswrapper[5055]: I1011 07:21:52.559006 5055 scope.go:117] "RemoveContainer" containerID="e6301e1b39c4a53eec0da2358fd5cf00a686fed2690b06e72389b91e5a219d99" Oct 11 07:21:52 crc kubenswrapper[5055]: I1011 07:21:52.586050 5055 scope.go:117] "RemoveContainer" containerID="7065c574492a21e6fdcf4a81ea128b10e7f9f4f28e6b62ea66aa04c7dd1957bb" Oct 11 07:21:52 crc kubenswrapper[5055]: I1011 07:21:52.611401 5055 scope.go:117] "RemoveContainer" containerID="f6898264fb5556b3944d3a8689d11d1d7edbf1e2e1599308646deff27daa9023" Oct 11 07:21:52 crc kubenswrapper[5055]: I1011 07:21:52.629412 5055 scope.go:117] "RemoveContainer" containerID="a36e56b7fca8b4f65e5fb319758ecc354a7e91d67c137d241820472741a23fe5" Oct 11 07:22:00 crc kubenswrapper[5055]: I1011 07:22:00.993564 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:22:00 crc kubenswrapper[5055]: E1011 07:22:00.994436 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:22:12 crc kubenswrapper[5055]: I1011 07:22:12.994166 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:22:12 crc kubenswrapper[5055]: E1011 07:22:12.995336 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:22:24 crc kubenswrapper[5055]: I1011 07:22:24.994250 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:22:24 crc kubenswrapper[5055]: E1011 07:22:24.995455 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:22:35 crc kubenswrapper[5055]: I1011 07:22:35.994552 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:22:35 crc kubenswrapper[5055]: E1011 07:22:35.995609 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:22:49 crc kubenswrapper[5055]: I1011 07:22:49.992949 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:22:49 crc kubenswrapper[5055]: E1011 07:22:49.993692 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:22:52 crc kubenswrapper[5055]: I1011 07:22:52.786093 5055 scope.go:117] "RemoveContainer" containerID="59f1f8256c40f1b0fc187bbfdda406a2648bc79c5276cfc25ef7d0febc6bb0f4" Oct 11 07:22:52 crc kubenswrapper[5055]: I1011 07:22:52.828730 5055 scope.go:117] "RemoveContainer" containerID="e924a4a0bf51b244d095f694add529a8eefd25c7b9d6ffbe0297fb57e1f80c20" Oct 11 07:23:01 crc kubenswrapper[5055]: I1011 07:23:01.993439 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:23:01 crc kubenswrapper[5055]: E1011 07:23:01.994290 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:23:13 crc kubenswrapper[5055]: I1011 07:23:13.994111 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:23:13 crc kubenswrapper[5055]: E1011 07:23:13.995159 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:23:27 crc kubenswrapper[5055]: I1011 07:23:27.993428 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:23:27 crc kubenswrapper[5055]: E1011 07:23:27.994139 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:23:42 crc kubenswrapper[5055]: I1011 07:23:42.996546 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:23:44 crc kubenswrapper[5055]: I1011 07:23:44.041440 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"90a506afbdc2e5593d1f36cdebcba81c98af7c39813d3b6c82be0fc50d31ec68"} Oct 11 07:23:52 crc kubenswrapper[5055]: I1011 07:23:52.894382 5055 scope.go:117] "RemoveContainer" containerID="00c617aa4b929d980bf4005136dd8cb35aa22f6579b8d52e5d42af62b008cfc3" Oct 11 07:23:52 crc kubenswrapper[5055]: I1011 07:23:52.913754 5055 scope.go:117] "RemoveContainer" containerID="8d98edc9568b84b014fa7d41ec6840140f135da6ee307da88b52c367995dc9a7" Oct 11 07:23:52 crc kubenswrapper[5055]: I1011 07:23:52.966396 5055 scope.go:117] "RemoveContainer" containerID="9005a9d40210899cb8c06dcf5d2cc8762eda1239eaeb43568f31ef7af56a71cc" Oct 11 07:26:02 crc kubenswrapper[5055]: I1011 07:26:02.421643 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:26:02 crc kubenswrapper[5055]: I1011 07:26:02.422205 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:26:32 crc kubenswrapper[5055]: I1011 07:26:32.421747 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:26:32 crc kubenswrapper[5055]: I1011 07:26:32.422403 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:27:02 crc kubenswrapper[5055]: I1011 07:27:02.422640 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:27:02 crc kubenswrapper[5055]: I1011 07:27:02.423119 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:27:02 crc kubenswrapper[5055]: I1011 07:27:02.423162 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 07:27:02 crc kubenswrapper[5055]: I1011 07:27:02.423757 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"90a506afbdc2e5593d1f36cdebcba81c98af7c39813d3b6c82be0fc50d31ec68"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 07:27:02 crc kubenswrapper[5055]: I1011 07:27:02.423843 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://90a506afbdc2e5593d1f36cdebcba81c98af7c39813d3b6c82be0fc50d31ec68" gracePeriod=600 Oct 11 07:27:03 crc kubenswrapper[5055]: I1011 07:27:03.555863 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="90a506afbdc2e5593d1f36cdebcba81c98af7c39813d3b6c82be0fc50d31ec68" exitCode=0 Oct 11 07:27:03 crc kubenswrapper[5055]: I1011 07:27:03.555913 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"90a506afbdc2e5593d1f36cdebcba81c98af7c39813d3b6c82be0fc50d31ec68"} Oct 11 07:27:03 crc kubenswrapper[5055]: I1011 07:27:03.556896 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590"} Oct 11 07:27:03 crc kubenswrapper[5055]: I1011 07:27:03.556926 5055 scope.go:117] "RemoveContainer" containerID="a619bd1368642885bdc36b064bd19733dfb495f7c45dcaa2e032e5927e513117" Oct 11 07:27:50 crc kubenswrapper[5055]: I1011 07:27:50.977146 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-278m8"] Oct 11 07:27:50 crc kubenswrapper[5055]: E1011 07:27:50.978564 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8859c7aa-75b4-404d-9427-55c33383cf64" containerName="extract-content" Oct 11 07:27:50 crc kubenswrapper[5055]: I1011 07:27:50.978583 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="8859c7aa-75b4-404d-9427-55c33383cf64" containerName="extract-content" Oct 11 07:27:50 crc kubenswrapper[5055]: E1011 07:27:50.978602 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8859c7aa-75b4-404d-9427-55c33383cf64" containerName="registry-server" Oct 11 07:27:50 crc kubenswrapper[5055]: I1011 07:27:50.978612 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="8859c7aa-75b4-404d-9427-55c33383cf64" containerName="registry-server" Oct 11 07:27:50 crc kubenswrapper[5055]: E1011 07:27:50.978626 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8859c7aa-75b4-404d-9427-55c33383cf64" containerName="extract-utilities" Oct 11 07:27:50 crc kubenswrapper[5055]: I1011 07:27:50.978635 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="8859c7aa-75b4-404d-9427-55c33383cf64" containerName="extract-utilities" Oct 11 07:27:50 crc kubenswrapper[5055]: E1011 07:27:50.978648 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7720ba37-80aa-4e0e-bc26-d663831cd2ca" containerName="extract-content" Oct 11 07:27:50 crc kubenswrapper[5055]: I1011 07:27:50.978656 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7720ba37-80aa-4e0e-bc26-d663831cd2ca" containerName="extract-content" Oct 11 07:27:50 crc kubenswrapper[5055]: E1011 07:27:50.978672 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7720ba37-80aa-4e0e-bc26-d663831cd2ca" containerName="registry-server" Oct 11 07:27:50 crc kubenswrapper[5055]: I1011 07:27:50.978679 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7720ba37-80aa-4e0e-bc26-d663831cd2ca" containerName="registry-server" Oct 11 07:27:50 crc kubenswrapper[5055]: E1011 07:27:50.978693 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7720ba37-80aa-4e0e-bc26-d663831cd2ca" containerName="extract-utilities" Oct 11 07:27:50 crc kubenswrapper[5055]: I1011 07:27:50.978701 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7720ba37-80aa-4e0e-bc26-d663831cd2ca" containerName="extract-utilities" Oct 11 07:27:50 crc kubenswrapper[5055]: I1011 07:27:50.980502 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="8859c7aa-75b4-404d-9427-55c33383cf64" containerName="registry-server" Oct 11 07:27:50 crc kubenswrapper[5055]: I1011 07:27:50.980626 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="7720ba37-80aa-4e0e-bc26-d663831cd2ca" containerName="registry-server" Oct 11 07:27:50 crc kubenswrapper[5055]: I1011 07:27:50.982834 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.010826 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-278m8"] Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.107384 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjvh8\" (UniqueName: \"kubernetes.io/projected/798589a4-fc38-491d-a458-f755fd34ee86-kube-api-access-sjvh8\") pod \"redhat-marketplace-278m8\" (UID: \"798589a4-fc38-491d-a458-f755fd34ee86\") " pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.107450 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/798589a4-fc38-491d-a458-f755fd34ee86-catalog-content\") pod \"redhat-marketplace-278m8\" (UID: \"798589a4-fc38-491d-a458-f755fd34ee86\") " pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.107525 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/798589a4-fc38-491d-a458-f755fd34ee86-utilities\") pod \"redhat-marketplace-278m8\" (UID: \"798589a4-fc38-491d-a458-f755fd34ee86\") " pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.209596 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/798589a4-fc38-491d-a458-f755fd34ee86-utilities\") pod \"redhat-marketplace-278m8\" (UID: \"798589a4-fc38-491d-a458-f755fd34ee86\") " pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.209788 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjvh8\" (UniqueName: \"kubernetes.io/projected/798589a4-fc38-491d-a458-f755fd34ee86-kube-api-access-sjvh8\") pod \"redhat-marketplace-278m8\" (UID: \"798589a4-fc38-491d-a458-f755fd34ee86\") " pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.209855 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/798589a4-fc38-491d-a458-f755fd34ee86-catalog-content\") pod \"redhat-marketplace-278m8\" (UID: \"798589a4-fc38-491d-a458-f755fd34ee86\") " pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.210175 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/798589a4-fc38-491d-a458-f755fd34ee86-utilities\") pod \"redhat-marketplace-278m8\" (UID: \"798589a4-fc38-491d-a458-f755fd34ee86\") " pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.210601 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/798589a4-fc38-491d-a458-f755fd34ee86-catalog-content\") pod \"redhat-marketplace-278m8\" (UID: \"798589a4-fc38-491d-a458-f755fd34ee86\") " pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.241989 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjvh8\" (UniqueName: \"kubernetes.io/projected/798589a4-fc38-491d-a458-f755fd34ee86-kube-api-access-sjvh8\") pod \"redhat-marketplace-278m8\" (UID: \"798589a4-fc38-491d-a458-f755fd34ee86\") " pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.317417 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.710223 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-278m8"] Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.921644 5055 generic.go:334] "Generic (PLEG): container finished" podID="798589a4-fc38-491d-a458-f755fd34ee86" containerID="215e4514b23fe01c6b5d10a3d06c3e44efad3fad919a60b7430d2d8198086c58" exitCode=0 Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.921749 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-278m8" event={"ID":"798589a4-fc38-491d-a458-f755fd34ee86","Type":"ContainerDied","Data":"215e4514b23fe01c6b5d10a3d06c3e44efad3fad919a60b7430d2d8198086c58"} Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.921960 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-278m8" event={"ID":"798589a4-fc38-491d-a458-f755fd34ee86","Type":"ContainerStarted","Data":"dd3058e7597a21377e9c4dc4b15a1cf883dc223e8c5b533e95016ec645017f5d"} Oct 11 07:27:51 crc kubenswrapper[5055]: I1011 07:27:51.924233 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 07:27:52 crc kubenswrapper[5055]: I1011 07:27:52.934685 5055 generic.go:334] "Generic (PLEG): container finished" podID="798589a4-fc38-491d-a458-f755fd34ee86" containerID="4bc7488c73a362b29fbcfc202a9e782e886c18fd2913361d0fcf0ed88f23066f" exitCode=0 Oct 11 07:27:52 crc kubenswrapper[5055]: I1011 07:27:52.934783 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-278m8" event={"ID":"798589a4-fc38-491d-a458-f755fd34ee86","Type":"ContainerDied","Data":"4bc7488c73a362b29fbcfc202a9e782e886c18fd2913361d0fcf0ed88f23066f"} Oct 11 07:27:53 crc kubenswrapper[5055]: I1011 07:27:53.944388 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-278m8" event={"ID":"798589a4-fc38-491d-a458-f755fd34ee86","Type":"ContainerStarted","Data":"31160f7e30769aacb4b87b45f24ce764bdd2baf282630ac886bcdd0b12dc7069"} Oct 11 07:27:53 crc kubenswrapper[5055]: I1011 07:27:53.964441 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-278m8" podStartSLOduration=2.480513856 podStartE2EDuration="3.964420023s" podCreationTimestamp="2025-10-11 07:27:50 +0000 UTC" firstStartedPulling="2025-10-11 07:27:51.924028786 +0000 UTC m=+2055.698302593" lastFinishedPulling="2025-10-11 07:27:53.407934953 +0000 UTC m=+2057.182208760" observedRunningTime="2025-10-11 07:27:53.962174959 +0000 UTC m=+2057.736448776" watchObservedRunningTime="2025-10-11 07:27:53.964420023 +0000 UTC m=+2057.738693840" Oct 11 07:28:01 crc kubenswrapper[5055]: I1011 07:28:01.318082 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:28:01 crc kubenswrapper[5055]: I1011 07:28:01.318681 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:28:01 crc kubenswrapper[5055]: I1011 07:28:01.354600 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:28:02 crc kubenswrapper[5055]: I1011 07:28:02.043263 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:28:02 crc kubenswrapper[5055]: I1011 07:28:02.089623 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-278m8"] Oct 11 07:28:04 crc kubenswrapper[5055]: I1011 07:28:04.020383 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-278m8" podUID="798589a4-fc38-491d-a458-f755fd34ee86" containerName="registry-server" containerID="cri-o://31160f7e30769aacb4b87b45f24ce764bdd2baf282630ac886bcdd0b12dc7069" gracePeriod=2 Oct 11 07:28:04 crc kubenswrapper[5055]: I1011 07:28:04.415436 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:28:04 crc kubenswrapper[5055]: I1011 07:28:04.600316 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjvh8\" (UniqueName: \"kubernetes.io/projected/798589a4-fc38-491d-a458-f755fd34ee86-kube-api-access-sjvh8\") pod \"798589a4-fc38-491d-a458-f755fd34ee86\" (UID: \"798589a4-fc38-491d-a458-f755fd34ee86\") " Oct 11 07:28:04 crc kubenswrapper[5055]: I1011 07:28:04.600370 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/798589a4-fc38-491d-a458-f755fd34ee86-utilities\") pod \"798589a4-fc38-491d-a458-f755fd34ee86\" (UID: \"798589a4-fc38-491d-a458-f755fd34ee86\") " Oct 11 07:28:04 crc kubenswrapper[5055]: I1011 07:28:04.600433 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/798589a4-fc38-491d-a458-f755fd34ee86-catalog-content\") pod \"798589a4-fc38-491d-a458-f755fd34ee86\" (UID: \"798589a4-fc38-491d-a458-f755fd34ee86\") " Oct 11 07:28:04 crc kubenswrapper[5055]: I1011 07:28:04.601466 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/798589a4-fc38-491d-a458-f755fd34ee86-utilities" (OuterVolumeSpecName: "utilities") pod "798589a4-fc38-491d-a458-f755fd34ee86" (UID: "798589a4-fc38-491d-a458-f755fd34ee86"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:28:04 crc kubenswrapper[5055]: I1011 07:28:04.613000 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/798589a4-fc38-491d-a458-f755fd34ee86-kube-api-access-sjvh8" (OuterVolumeSpecName: "kube-api-access-sjvh8") pod "798589a4-fc38-491d-a458-f755fd34ee86" (UID: "798589a4-fc38-491d-a458-f755fd34ee86"). InnerVolumeSpecName "kube-api-access-sjvh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:28:04 crc kubenswrapper[5055]: I1011 07:28:04.614103 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/798589a4-fc38-491d-a458-f755fd34ee86-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "798589a4-fc38-491d-a458-f755fd34ee86" (UID: "798589a4-fc38-491d-a458-f755fd34ee86"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:28:04 crc kubenswrapper[5055]: I1011 07:28:04.702192 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/798589a4-fc38-491d-a458-f755fd34ee86-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:28:04 crc kubenswrapper[5055]: I1011 07:28:04.702227 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjvh8\" (UniqueName: \"kubernetes.io/projected/798589a4-fc38-491d-a458-f755fd34ee86-kube-api-access-sjvh8\") on node \"crc\" DevicePath \"\"" Oct 11 07:28:04 crc kubenswrapper[5055]: I1011 07:28:04.702239 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/798589a4-fc38-491d-a458-f755fd34ee86-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.028509 5055 generic.go:334] "Generic (PLEG): container finished" podID="798589a4-fc38-491d-a458-f755fd34ee86" containerID="31160f7e30769aacb4b87b45f24ce764bdd2baf282630ac886bcdd0b12dc7069" exitCode=0 Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.028566 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-278m8" Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.028585 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-278m8" event={"ID":"798589a4-fc38-491d-a458-f755fd34ee86","Type":"ContainerDied","Data":"31160f7e30769aacb4b87b45f24ce764bdd2baf282630ac886bcdd0b12dc7069"} Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.028992 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-278m8" event={"ID":"798589a4-fc38-491d-a458-f755fd34ee86","Type":"ContainerDied","Data":"dd3058e7597a21377e9c4dc4b15a1cf883dc223e8c5b533e95016ec645017f5d"} Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.029012 5055 scope.go:117] "RemoveContainer" containerID="31160f7e30769aacb4b87b45f24ce764bdd2baf282630ac886bcdd0b12dc7069" Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.045804 5055 scope.go:117] "RemoveContainer" containerID="4bc7488c73a362b29fbcfc202a9e782e886c18fd2913361d0fcf0ed88f23066f" Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.052832 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-278m8"] Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.057818 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-278m8"] Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.064777 5055 scope.go:117] "RemoveContainer" containerID="215e4514b23fe01c6b5d10a3d06c3e44efad3fad919a60b7430d2d8198086c58" Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.091537 5055 scope.go:117] "RemoveContainer" containerID="31160f7e30769aacb4b87b45f24ce764bdd2baf282630ac886bcdd0b12dc7069" Oct 11 07:28:05 crc kubenswrapper[5055]: E1011 07:28:05.092249 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31160f7e30769aacb4b87b45f24ce764bdd2baf282630ac886bcdd0b12dc7069\": container with ID starting with 31160f7e30769aacb4b87b45f24ce764bdd2baf282630ac886bcdd0b12dc7069 not found: ID does not exist" containerID="31160f7e30769aacb4b87b45f24ce764bdd2baf282630ac886bcdd0b12dc7069" Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.092296 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31160f7e30769aacb4b87b45f24ce764bdd2baf282630ac886bcdd0b12dc7069"} err="failed to get container status \"31160f7e30769aacb4b87b45f24ce764bdd2baf282630ac886bcdd0b12dc7069\": rpc error: code = NotFound desc = could not find container \"31160f7e30769aacb4b87b45f24ce764bdd2baf282630ac886bcdd0b12dc7069\": container with ID starting with 31160f7e30769aacb4b87b45f24ce764bdd2baf282630ac886bcdd0b12dc7069 not found: ID does not exist" Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.092455 5055 scope.go:117] "RemoveContainer" containerID="4bc7488c73a362b29fbcfc202a9e782e886c18fd2913361d0fcf0ed88f23066f" Oct 11 07:28:05 crc kubenswrapper[5055]: E1011 07:28:05.092942 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bc7488c73a362b29fbcfc202a9e782e886c18fd2913361d0fcf0ed88f23066f\": container with ID starting with 4bc7488c73a362b29fbcfc202a9e782e886c18fd2913361d0fcf0ed88f23066f not found: ID does not exist" containerID="4bc7488c73a362b29fbcfc202a9e782e886c18fd2913361d0fcf0ed88f23066f" Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.092973 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bc7488c73a362b29fbcfc202a9e782e886c18fd2913361d0fcf0ed88f23066f"} err="failed to get container status \"4bc7488c73a362b29fbcfc202a9e782e886c18fd2913361d0fcf0ed88f23066f\": rpc error: code = NotFound desc = could not find container \"4bc7488c73a362b29fbcfc202a9e782e886c18fd2913361d0fcf0ed88f23066f\": container with ID starting with 4bc7488c73a362b29fbcfc202a9e782e886c18fd2913361d0fcf0ed88f23066f not found: ID does not exist" Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.092992 5055 scope.go:117] "RemoveContainer" containerID="215e4514b23fe01c6b5d10a3d06c3e44efad3fad919a60b7430d2d8198086c58" Oct 11 07:28:05 crc kubenswrapper[5055]: E1011 07:28:05.093339 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"215e4514b23fe01c6b5d10a3d06c3e44efad3fad919a60b7430d2d8198086c58\": container with ID starting with 215e4514b23fe01c6b5d10a3d06c3e44efad3fad919a60b7430d2d8198086c58 not found: ID does not exist" containerID="215e4514b23fe01c6b5d10a3d06c3e44efad3fad919a60b7430d2d8198086c58" Oct 11 07:28:05 crc kubenswrapper[5055]: I1011 07:28:05.093396 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"215e4514b23fe01c6b5d10a3d06c3e44efad3fad919a60b7430d2d8198086c58"} err="failed to get container status \"215e4514b23fe01c6b5d10a3d06c3e44efad3fad919a60b7430d2d8198086c58\": rpc error: code = NotFound desc = could not find container \"215e4514b23fe01c6b5d10a3d06c3e44efad3fad919a60b7430d2d8198086c58\": container with ID starting with 215e4514b23fe01c6b5d10a3d06c3e44efad3fad919a60b7430d2d8198086c58 not found: ID does not exist" Oct 11 07:28:07 crc kubenswrapper[5055]: I1011 07:28:07.004658 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="798589a4-fc38-491d-a458-f755fd34ee86" path="/var/lib/kubelet/pods/798589a4-fc38-491d-a458-f755fd34ee86/volumes" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.099208 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-snsnp"] Oct 11 07:28:54 crc kubenswrapper[5055]: E1011 07:28:54.100798 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="798589a4-fc38-491d-a458-f755fd34ee86" containerName="extract-content" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.100824 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="798589a4-fc38-491d-a458-f755fd34ee86" containerName="extract-content" Oct 11 07:28:54 crc kubenswrapper[5055]: E1011 07:28:54.100857 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="798589a4-fc38-491d-a458-f755fd34ee86" containerName="extract-utilities" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.100871 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="798589a4-fc38-491d-a458-f755fd34ee86" containerName="extract-utilities" Oct 11 07:28:54 crc kubenswrapper[5055]: E1011 07:28:54.100903 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="798589a4-fc38-491d-a458-f755fd34ee86" containerName="registry-server" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.100915 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="798589a4-fc38-491d-a458-f755fd34ee86" containerName="registry-server" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.101210 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="798589a4-fc38-491d-a458-f755fd34ee86" containerName="registry-server" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.102841 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.112412 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-snsnp"] Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.231387 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42ef1b1a-ca25-450e-a13a-334407e5a608-utilities\") pod \"certified-operators-snsnp\" (UID: \"42ef1b1a-ca25-450e-a13a-334407e5a608\") " pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.231821 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5rv2\" (UniqueName: \"kubernetes.io/projected/42ef1b1a-ca25-450e-a13a-334407e5a608-kube-api-access-d5rv2\") pod \"certified-operators-snsnp\" (UID: \"42ef1b1a-ca25-450e-a13a-334407e5a608\") " pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.231878 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42ef1b1a-ca25-450e-a13a-334407e5a608-catalog-content\") pod \"certified-operators-snsnp\" (UID: \"42ef1b1a-ca25-450e-a13a-334407e5a608\") " pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.333265 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5rv2\" (UniqueName: \"kubernetes.io/projected/42ef1b1a-ca25-450e-a13a-334407e5a608-kube-api-access-d5rv2\") pod \"certified-operators-snsnp\" (UID: \"42ef1b1a-ca25-450e-a13a-334407e5a608\") " pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.333376 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42ef1b1a-ca25-450e-a13a-334407e5a608-catalog-content\") pod \"certified-operators-snsnp\" (UID: \"42ef1b1a-ca25-450e-a13a-334407e5a608\") " pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.333426 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42ef1b1a-ca25-450e-a13a-334407e5a608-utilities\") pod \"certified-operators-snsnp\" (UID: \"42ef1b1a-ca25-450e-a13a-334407e5a608\") " pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.333913 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42ef1b1a-ca25-450e-a13a-334407e5a608-catalog-content\") pod \"certified-operators-snsnp\" (UID: \"42ef1b1a-ca25-450e-a13a-334407e5a608\") " pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.334036 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42ef1b1a-ca25-450e-a13a-334407e5a608-utilities\") pod \"certified-operators-snsnp\" (UID: \"42ef1b1a-ca25-450e-a13a-334407e5a608\") " pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.356668 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5rv2\" (UniqueName: \"kubernetes.io/projected/42ef1b1a-ca25-450e-a13a-334407e5a608-kube-api-access-d5rv2\") pod \"certified-operators-snsnp\" (UID: \"42ef1b1a-ca25-450e-a13a-334407e5a608\") " pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.422020 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:28:54 crc kubenswrapper[5055]: I1011 07:28:54.917594 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-snsnp"] Oct 11 07:28:55 crc kubenswrapper[5055]: I1011 07:28:55.376268 5055 generic.go:334] "Generic (PLEG): container finished" podID="42ef1b1a-ca25-450e-a13a-334407e5a608" containerID="1c8d56ed74cf58c1e889203e5bec72f7620d7c6e64cbd3607d861b9a9328782e" exitCode=0 Oct 11 07:28:55 crc kubenswrapper[5055]: I1011 07:28:55.376320 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-snsnp" event={"ID":"42ef1b1a-ca25-450e-a13a-334407e5a608","Type":"ContainerDied","Data":"1c8d56ed74cf58c1e889203e5bec72f7620d7c6e64cbd3607d861b9a9328782e"} Oct 11 07:28:55 crc kubenswrapper[5055]: I1011 07:28:55.376370 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-snsnp" event={"ID":"42ef1b1a-ca25-450e-a13a-334407e5a608","Type":"ContainerStarted","Data":"df34745d8e986852458a4f49b40a7bd6c8b401be8bde711c933a6fed13b79b08"} Oct 11 07:28:56 crc kubenswrapper[5055]: I1011 07:28:56.385567 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-snsnp" event={"ID":"42ef1b1a-ca25-450e-a13a-334407e5a608","Type":"ContainerStarted","Data":"2512245d086afa131d618da701b41173c4ed39c7967c199ecbf22ca568f8c8a3"} Oct 11 07:28:57 crc kubenswrapper[5055]: I1011 07:28:57.396454 5055 generic.go:334] "Generic (PLEG): container finished" podID="42ef1b1a-ca25-450e-a13a-334407e5a608" containerID="2512245d086afa131d618da701b41173c4ed39c7967c199ecbf22ca568f8c8a3" exitCode=0 Oct 11 07:28:57 crc kubenswrapper[5055]: I1011 07:28:57.396508 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-snsnp" event={"ID":"42ef1b1a-ca25-450e-a13a-334407e5a608","Type":"ContainerDied","Data":"2512245d086afa131d618da701b41173c4ed39c7967c199ecbf22ca568f8c8a3"} Oct 11 07:28:58 crc kubenswrapper[5055]: I1011 07:28:58.405302 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-snsnp" event={"ID":"42ef1b1a-ca25-450e-a13a-334407e5a608","Type":"ContainerStarted","Data":"5f6837acb0273c46b080494eefd03f34d7c02416d6d59fc381edc813724a2b9a"} Oct 11 07:28:58 crc kubenswrapper[5055]: I1011 07:28:58.423977 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-snsnp" podStartSLOduration=2.02980519 podStartE2EDuration="4.423952906s" podCreationTimestamp="2025-10-11 07:28:54 +0000 UTC" firstStartedPulling="2025-10-11 07:28:55.377511234 +0000 UTC m=+2119.151785041" lastFinishedPulling="2025-10-11 07:28:57.77165895 +0000 UTC m=+2121.545932757" observedRunningTime="2025-10-11 07:28:58.421021173 +0000 UTC m=+2122.195295010" watchObservedRunningTime="2025-10-11 07:28:58.423952906 +0000 UTC m=+2122.198226713" Oct 11 07:29:02 crc kubenswrapper[5055]: I1011 07:29:02.423013 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:29:02 crc kubenswrapper[5055]: I1011 07:29:02.423603 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:29:04 crc kubenswrapper[5055]: I1011 07:29:04.423565 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:29:04 crc kubenswrapper[5055]: I1011 07:29:04.424059 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:29:04 crc kubenswrapper[5055]: I1011 07:29:04.472394 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:29:04 crc kubenswrapper[5055]: I1011 07:29:04.530433 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:29:04 crc kubenswrapper[5055]: I1011 07:29:04.719527 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-snsnp"] Oct 11 07:29:06 crc kubenswrapper[5055]: I1011 07:29:06.467879 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-snsnp" podUID="42ef1b1a-ca25-450e-a13a-334407e5a608" containerName="registry-server" containerID="cri-o://5f6837acb0273c46b080494eefd03f34d7c02416d6d59fc381edc813724a2b9a" gracePeriod=2 Oct 11 07:29:06 crc kubenswrapper[5055]: I1011 07:29:06.869949 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:29:06 crc kubenswrapper[5055]: I1011 07:29:06.933634 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42ef1b1a-ca25-450e-a13a-334407e5a608-catalog-content\") pod \"42ef1b1a-ca25-450e-a13a-334407e5a608\" (UID: \"42ef1b1a-ca25-450e-a13a-334407e5a608\") " Oct 11 07:29:06 crc kubenswrapper[5055]: I1011 07:29:06.933802 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d5rv2\" (UniqueName: \"kubernetes.io/projected/42ef1b1a-ca25-450e-a13a-334407e5a608-kube-api-access-d5rv2\") pod \"42ef1b1a-ca25-450e-a13a-334407e5a608\" (UID: \"42ef1b1a-ca25-450e-a13a-334407e5a608\") " Oct 11 07:29:06 crc kubenswrapper[5055]: I1011 07:29:06.933862 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42ef1b1a-ca25-450e-a13a-334407e5a608-utilities\") pod \"42ef1b1a-ca25-450e-a13a-334407e5a608\" (UID: \"42ef1b1a-ca25-450e-a13a-334407e5a608\") " Oct 11 07:29:06 crc kubenswrapper[5055]: I1011 07:29:06.935242 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42ef1b1a-ca25-450e-a13a-334407e5a608-utilities" (OuterVolumeSpecName: "utilities") pod "42ef1b1a-ca25-450e-a13a-334407e5a608" (UID: "42ef1b1a-ca25-450e-a13a-334407e5a608"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:29:06 crc kubenswrapper[5055]: I1011 07:29:06.939233 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42ef1b1a-ca25-450e-a13a-334407e5a608-kube-api-access-d5rv2" (OuterVolumeSpecName: "kube-api-access-d5rv2") pod "42ef1b1a-ca25-450e-a13a-334407e5a608" (UID: "42ef1b1a-ca25-450e-a13a-334407e5a608"). InnerVolumeSpecName "kube-api-access-d5rv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:29:06 crc kubenswrapper[5055]: I1011 07:29:06.981422 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42ef1b1a-ca25-450e-a13a-334407e5a608-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "42ef1b1a-ca25-450e-a13a-334407e5a608" (UID: "42ef1b1a-ca25-450e-a13a-334407e5a608"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.036017 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d5rv2\" (UniqueName: \"kubernetes.io/projected/42ef1b1a-ca25-450e-a13a-334407e5a608-kube-api-access-d5rv2\") on node \"crc\" DevicePath \"\"" Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.036069 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/42ef1b1a-ca25-450e-a13a-334407e5a608-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.036082 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/42ef1b1a-ca25-450e-a13a-334407e5a608-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.479076 5055 generic.go:334] "Generic (PLEG): container finished" podID="42ef1b1a-ca25-450e-a13a-334407e5a608" containerID="5f6837acb0273c46b080494eefd03f34d7c02416d6d59fc381edc813724a2b9a" exitCode=0 Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.479123 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-snsnp" event={"ID":"42ef1b1a-ca25-450e-a13a-334407e5a608","Type":"ContainerDied","Data":"5f6837acb0273c46b080494eefd03f34d7c02416d6d59fc381edc813724a2b9a"} Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.479168 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-snsnp" Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.479192 5055 scope.go:117] "RemoveContainer" containerID="5f6837acb0273c46b080494eefd03f34d7c02416d6d59fc381edc813724a2b9a" Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.479179 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-snsnp" event={"ID":"42ef1b1a-ca25-450e-a13a-334407e5a608","Type":"ContainerDied","Data":"df34745d8e986852458a4f49b40a7bd6c8b401be8bde711c933a6fed13b79b08"} Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.502208 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-snsnp"] Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.506713 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-snsnp"] Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.513136 5055 scope.go:117] "RemoveContainer" containerID="2512245d086afa131d618da701b41173c4ed39c7967c199ecbf22ca568f8c8a3" Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.537606 5055 scope.go:117] "RemoveContainer" containerID="1c8d56ed74cf58c1e889203e5bec72f7620d7c6e64cbd3607d861b9a9328782e" Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.556013 5055 scope.go:117] "RemoveContainer" containerID="5f6837acb0273c46b080494eefd03f34d7c02416d6d59fc381edc813724a2b9a" Oct 11 07:29:07 crc kubenswrapper[5055]: E1011 07:29:07.557160 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f6837acb0273c46b080494eefd03f34d7c02416d6d59fc381edc813724a2b9a\": container with ID starting with 5f6837acb0273c46b080494eefd03f34d7c02416d6d59fc381edc813724a2b9a not found: ID does not exist" containerID="5f6837acb0273c46b080494eefd03f34d7c02416d6d59fc381edc813724a2b9a" Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.557201 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f6837acb0273c46b080494eefd03f34d7c02416d6d59fc381edc813724a2b9a"} err="failed to get container status \"5f6837acb0273c46b080494eefd03f34d7c02416d6d59fc381edc813724a2b9a\": rpc error: code = NotFound desc = could not find container \"5f6837acb0273c46b080494eefd03f34d7c02416d6d59fc381edc813724a2b9a\": container with ID starting with 5f6837acb0273c46b080494eefd03f34d7c02416d6d59fc381edc813724a2b9a not found: ID does not exist" Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.557230 5055 scope.go:117] "RemoveContainer" containerID="2512245d086afa131d618da701b41173c4ed39c7967c199ecbf22ca568f8c8a3" Oct 11 07:29:07 crc kubenswrapper[5055]: E1011 07:29:07.557564 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2512245d086afa131d618da701b41173c4ed39c7967c199ecbf22ca568f8c8a3\": container with ID starting with 2512245d086afa131d618da701b41173c4ed39c7967c199ecbf22ca568f8c8a3 not found: ID does not exist" containerID="2512245d086afa131d618da701b41173c4ed39c7967c199ecbf22ca568f8c8a3" Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.557602 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2512245d086afa131d618da701b41173c4ed39c7967c199ecbf22ca568f8c8a3"} err="failed to get container status \"2512245d086afa131d618da701b41173c4ed39c7967c199ecbf22ca568f8c8a3\": rpc error: code = NotFound desc = could not find container \"2512245d086afa131d618da701b41173c4ed39c7967c199ecbf22ca568f8c8a3\": container with ID starting with 2512245d086afa131d618da701b41173c4ed39c7967c199ecbf22ca568f8c8a3 not found: ID does not exist" Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.557630 5055 scope.go:117] "RemoveContainer" containerID="1c8d56ed74cf58c1e889203e5bec72f7620d7c6e64cbd3607d861b9a9328782e" Oct 11 07:29:07 crc kubenswrapper[5055]: E1011 07:29:07.558133 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c8d56ed74cf58c1e889203e5bec72f7620d7c6e64cbd3607d861b9a9328782e\": container with ID starting with 1c8d56ed74cf58c1e889203e5bec72f7620d7c6e64cbd3607d861b9a9328782e not found: ID does not exist" containerID="1c8d56ed74cf58c1e889203e5bec72f7620d7c6e64cbd3607d861b9a9328782e" Oct 11 07:29:07 crc kubenswrapper[5055]: I1011 07:29:07.558158 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c8d56ed74cf58c1e889203e5bec72f7620d7c6e64cbd3607d861b9a9328782e"} err="failed to get container status \"1c8d56ed74cf58c1e889203e5bec72f7620d7c6e64cbd3607d861b9a9328782e\": rpc error: code = NotFound desc = could not find container \"1c8d56ed74cf58c1e889203e5bec72f7620d7c6e64cbd3607d861b9a9328782e\": container with ID starting with 1c8d56ed74cf58c1e889203e5bec72f7620d7c6e64cbd3607d861b9a9328782e not found: ID does not exist" Oct 11 07:29:09 crc kubenswrapper[5055]: I1011 07:29:09.005167 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42ef1b1a-ca25-450e-a13a-334407e5a608" path="/var/lib/kubelet/pods/42ef1b1a-ca25-450e-a13a-334407e5a608/volumes" Oct 11 07:29:32 crc kubenswrapper[5055]: I1011 07:29:32.421871 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:29:32 crc kubenswrapper[5055]: I1011 07:29:32.422364 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.147288 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc"] Oct 11 07:30:00 crc kubenswrapper[5055]: E1011 07:30:00.153347 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42ef1b1a-ca25-450e-a13a-334407e5a608" containerName="extract-content" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.153897 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="42ef1b1a-ca25-450e-a13a-334407e5a608" containerName="extract-content" Oct 11 07:30:00 crc kubenswrapper[5055]: E1011 07:30:00.153935 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42ef1b1a-ca25-450e-a13a-334407e5a608" containerName="registry-server" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.153946 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="42ef1b1a-ca25-450e-a13a-334407e5a608" containerName="registry-server" Oct 11 07:30:00 crc kubenswrapper[5055]: E1011 07:30:00.153967 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42ef1b1a-ca25-450e-a13a-334407e5a608" containerName="extract-utilities" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.153978 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="42ef1b1a-ca25-450e-a13a-334407e5a608" containerName="extract-utilities" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.154227 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="42ef1b1a-ca25-450e-a13a-334407e5a608" containerName="registry-server" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.155097 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.158345 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.159147 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc"] Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.160874 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.340975 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b1e30299-f1f2-44db-9782-40de14373f55-secret-volume\") pod \"collect-profiles-29336130-lljdc\" (UID: \"b1e30299-f1f2-44db-9782-40de14373f55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.341035 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b1e30299-f1f2-44db-9782-40de14373f55-config-volume\") pod \"collect-profiles-29336130-lljdc\" (UID: \"b1e30299-f1f2-44db-9782-40de14373f55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.341083 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwh2w\" (UniqueName: \"kubernetes.io/projected/b1e30299-f1f2-44db-9782-40de14373f55-kube-api-access-xwh2w\") pod \"collect-profiles-29336130-lljdc\" (UID: \"b1e30299-f1f2-44db-9782-40de14373f55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.442254 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b1e30299-f1f2-44db-9782-40de14373f55-config-volume\") pod \"collect-profiles-29336130-lljdc\" (UID: \"b1e30299-f1f2-44db-9782-40de14373f55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.442338 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwh2w\" (UniqueName: \"kubernetes.io/projected/b1e30299-f1f2-44db-9782-40de14373f55-kube-api-access-xwh2w\") pod \"collect-profiles-29336130-lljdc\" (UID: \"b1e30299-f1f2-44db-9782-40de14373f55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.442401 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b1e30299-f1f2-44db-9782-40de14373f55-secret-volume\") pod \"collect-profiles-29336130-lljdc\" (UID: \"b1e30299-f1f2-44db-9782-40de14373f55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.443095 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b1e30299-f1f2-44db-9782-40de14373f55-config-volume\") pod \"collect-profiles-29336130-lljdc\" (UID: \"b1e30299-f1f2-44db-9782-40de14373f55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.448436 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b1e30299-f1f2-44db-9782-40de14373f55-secret-volume\") pod \"collect-profiles-29336130-lljdc\" (UID: \"b1e30299-f1f2-44db-9782-40de14373f55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.459310 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwh2w\" (UniqueName: \"kubernetes.io/projected/b1e30299-f1f2-44db-9782-40de14373f55-kube-api-access-xwh2w\") pod \"collect-profiles-29336130-lljdc\" (UID: \"b1e30299-f1f2-44db-9782-40de14373f55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.479474 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" Oct 11 07:30:00 crc kubenswrapper[5055]: I1011 07:30:00.891356 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc"] Oct 11 07:30:01 crc kubenswrapper[5055]: I1011 07:30:01.876458 5055 generic.go:334] "Generic (PLEG): container finished" podID="b1e30299-f1f2-44db-9782-40de14373f55" containerID="2ff16425d66b3317f75c747be7e31c9e91624ab6f9ed3f8c25560d428beecd12" exitCode=0 Oct 11 07:30:01 crc kubenswrapper[5055]: I1011 07:30:01.876506 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" event={"ID":"b1e30299-f1f2-44db-9782-40de14373f55","Type":"ContainerDied","Data":"2ff16425d66b3317f75c747be7e31c9e91624ab6f9ed3f8c25560d428beecd12"} Oct 11 07:30:01 crc kubenswrapper[5055]: I1011 07:30:01.876815 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" event={"ID":"b1e30299-f1f2-44db-9782-40de14373f55","Type":"ContainerStarted","Data":"7fb813682661c5ceebf3183f631e31df5c2c384e05fcdf58e36d0bafc8add2bf"} Oct 11 07:30:02 crc kubenswrapper[5055]: I1011 07:30:02.421930 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:30:02 crc kubenswrapper[5055]: I1011 07:30:02.421986 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:30:02 crc kubenswrapper[5055]: I1011 07:30:02.422035 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 07:30:02 crc kubenswrapper[5055]: I1011 07:30:02.422560 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 07:30:02 crc kubenswrapper[5055]: I1011 07:30:02.422622 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" gracePeriod=600 Oct 11 07:30:02 crc kubenswrapper[5055]: E1011 07:30:02.541389 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:30:02 crc kubenswrapper[5055]: I1011 07:30:02.884373 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" exitCode=0 Oct 11 07:30:02 crc kubenswrapper[5055]: I1011 07:30:02.884436 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590"} Oct 11 07:30:02 crc kubenswrapper[5055]: I1011 07:30:02.884493 5055 scope.go:117] "RemoveContainer" containerID="90a506afbdc2e5593d1f36cdebcba81c98af7c39813d3b6c82be0fc50d31ec68" Oct 11 07:30:02 crc kubenswrapper[5055]: I1011 07:30:02.885095 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:30:02 crc kubenswrapper[5055]: E1011 07:30:02.885347 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:30:03 crc kubenswrapper[5055]: I1011 07:30:03.142796 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" Oct 11 07:30:03 crc kubenswrapper[5055]: I1011 07:30:03.279812 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b1e30299-f1f2-44db-9782-40de14373f55-config-volume\") pod \"b1e30299-f1f2-44db-9782-40de14373f55\" (UID: \"b1e30299-f1f2-44db-9782-40de14373f55\") " Oct 11 07:30:03 crc kubenswrapper[5055]: I1011 07:30:03.279874 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b1e30299-f1f2-44db-9782-40de14373f55-secret-volume\") pod \"b1e30299-f1f2-44db-9782-40de14373f55\" (UID: \"b1e30299-f1f2-44db-9782-40de14373f55\") " Oct 11 07:30:03 crc kubenswrapper[5055]: I1011 07:30:03.279925 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwh2w\" (UniqueName: \"kubernetes.io/projected/b1e30299-f1f2-44db-9782-40de14373f55-kube-api-access-xwh2w\") pod \"b1e30299-f1f2-44db-9782-40de14373f55\" (UID: \"b1e30299-f1f2-44db-9782-40de14373f55\") " Oct 11 07:30:03 crc kubenswrapper[5055]: I1011 07:30:03.280839 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1e30299-f1f2-44db-9782-40de14373f55-config-volume" (OuterVolumeSpecName: "config-volume") pod "b1e30299-f1f2-44db-9782-40de14373f55" (UID: "b1e30299-f1f2-44db-9782-40de14373f55"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:30:03 crc kubenswrapper[5055]: I1011 07:30:03.285332 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1e30299-f1f2-44db-9782-40de14373f55-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b1e30299-f1f2-44db-9782-40de14373f55" (UID: "b1e30299-f1f2-44db-9782-40de14373f55"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:30:03 crc kubenswrapper[5055]: I1011 07:30:03.285731 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1e30299-f1f2-44db-9782-40de14373f55-kube-api-access-xwh2w" (OuterVolumeSpecName: "kube-api-access-xwh2w") pod "b1e30299-f1f2-44db-9782-40de14373f55" (UID: "b1e30299-f1f2-44db-9782-40de14373f55"). InnerVolumeSpecName "kube-api-access-xwh2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:30:03 crc kubenswrapper[5055]: I1011 07:30:03.381888 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwh2w\" (UniqueName: \"kubernetes.io/projected/b1e30299-f1f2-44db-9782-40de14373f55-kube-api-access-xwh2w\") on node \"crc\" DevicePath \"\"" Oct 11 07:30:03 crc kubenswrapper[5055]: I1011 07:30:03.381935 5055 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b1e30299-f1f2-44db-9782-40de14373f55-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 07:30:03 crc kubenswrapper[5055]: I1011 07:30:03.381949 5055 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b1e30299-f1f2-44db-9782-40de14373f55-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 07:30:03 crc kubenswrapper[5055]: I1011 07:30:03.893724 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" event={"ID":"b1e30299-f1f2-44db-9782-40de14373f55","Type":"ContainerDied","Data":"7fb813682661c5ceebf3183f631e31df5c2c384e05fcdf58e36d0bafc8add2bf"} Oct 11 07:30:03 crc kubenswrapper[5055]: I1011 07:30:03.893757 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc" Oct 11 07:30:03 crc kubenswrapper[5055]: I1011 07:30:03.893789 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7fb813682661c5ceebf3183f631e31df5c2c384e05fcdf58e36d0bafc8add2bf" Oct 11 07:30:04 crc kubenswrapper[5055]: I1011 07:30:04.218449 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg"] Oct 11 07:30:04 crc kubenswrapper[5055]: I1011 07:30:04.222906 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336085-mbdqg"] Oct 11 07:30:05 crc kubenswrapper[5055]: I1011 07:30:05.004448 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25f102e3-a97b-4401-a760-98f34d6fe038" path="/var/lib/kubelet/pods/25f102e3-a97b-4401-a760-98f34d6fe038/volumes" Oct 11 07:30:13 crc kubenswrapper[5055]: I1011 07:30:13.994170 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:30:13 crc kubenswrapper[5055]: E1011 07:30:13.994991 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:30:28 crc kubenswrapper[5055]: I1011 07:30:28.994986 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:30:28 crc kubenswrapper[5055]: E1011 07:30:28.995947 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:30:41 crc kubenswrapper[5055]: I1011 07:30:41.993706 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:30:41 crc kubenswrapper[5055]: E1011 07:30:41.995323 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:30:53 crc kubenswrapper[5055]: I1011 07:30:53.159805 5055 scope.go:117] "RemoveContainer" containerID="c8abc14887fe6b1e823f609b31de625ff1fe1c99704f169a579d6b80fa424e8a" Oct 11 07:30:53 crc kubenswrapper[5055]: I1011 07:30:53.993377 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:30:53 crc kubenswrapper[5055]: E1011 07:30:53.994020 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:31:07 crc kubenswrapper[5055]: I1011 07:31:07.993626 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:31:07 crc kubenswrapper[5055]: E1011 07:31:07.994386 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:31:20 crc kubenswrapper[5055]: I1011 07:31:20.993983 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:31:20 crc kubenswrapper[5055]: E1011 07:31:20.995443 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:31:31 crc kubenswrapper[5055]: I1011 07:31:31.993982 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:31:31 crc kubenswrapper[5055]: E1011 07:31:31.995044 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:31:44 crc kubenswrapper[5055]: I1011 07:31:44.993469 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:31:44 crc kubenswrapper[5055]: E1011 07:31:44.994223 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:31:56 crc kubenswrapper[5055]: I1011 07:31:56.997871 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:31:57 crc kubenswrapper[5055]: E1011 07:31:56.998653 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:32:08 crc kubenswrapper[5055]: I1011 07:32:08.993312 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:32:08 crc kubenswrapper[5055]: E1011 07:32:08.994130 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.505474 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2pjbh"] Oct 11 07:32:18 crc kubenswrapper[5055]: E1011 07:32:18.506338 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1e30299-f1f2-44db-9782-40de14373f55" containerName="collect-profiles" Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.506356 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1e30299-f1f2-44db-9782-40de14373f55" containerName="collect-profiles" Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.506512 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1e30299-f1f2-44db-9782-40de14373f55" containerName="collect-profiles" Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.507628 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.517856 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2pjbh"] Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.531811 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfzdm\" (UniqueName: \"kubernetes.io/projected/3308dd2b-9640-49dd-add9-ed69e1710682-kube-api-access-mfzdm\") pod \"redhat-operators-2pjbh\" (UID: \"3308dd2b-9640-49dd-add9-ed69e1710682\") " pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.531866 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3308dd2b-9640-49dd-add9-ed69e1710682-catalog-content\") pod \"redhat-operators-2pjbh\" (UID: \"3308dd2b-9640-49dd-add9-ed69e1710682\") " pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.531939 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3308dd2b-9640-49dd-add9-ed69e1710682-utilities\") pod \"redhat-operators-2pjbh\" (UID: \"3308dd2b-9640-49dd-add9-ed69e1710682\") " pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.632691 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfzdm\" (UniqueName: \"kubernetes.io/projected/3308dd2b-9640-49dd-add9-ed69e1710682-kube-api-access-mfzdm\") pod \"redhat-operators-2pjbh\" (UID: \"3308dd2b-9640-49dd-add9-ed69e1710682\") " pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.633095 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3308dd2b-9640-49dd-add9-ed69e1710682-catalog-content\") pod \"redhat-operators-2pjbh\" (UID: \"3308dd2b-9640-49dd-add9-ed69e1710682\") " pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.633191 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3308dd2b-9640-49dd-add9-ed69e1710682-utilities\") pod \"redhat-operators-2pjbh\" (UID: \"3308dd2b-9640-49dd-add9-ed69e1710682\") " pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.633661 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3308dd2b-9640-49dd-add9-ed69e1710682-catalog-content\") pod \"redhat-operators-2pjbh\" (UID: \"3308dd2b-9640-49dd-add9-ed69e1710682\") " pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.633803 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3308dd2b-9640-49dd-add9-ed69e1710682-utilities\") pod \"redhat-operators-2pjbh\" (UID: \"3308dd2b-9640-49dd-add9-ed69e1710682\") " pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.653109 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfzdm\" (UniqueName: \"kubernetes.io/projected/3308dd2b-9640-49dd-add9-ed69e1710682-kube-api-access-mfzdm\") pod \"redhat-operators-2pjbh\" (UID: \"3308dd2b-9640-49dd-add9-ed69e1710682\") " pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:18 crc kubenswrapper[5055]: I1011 07:32:18.830864 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:19 crc kubenswrapper[5055]: I1011 07:32:19.273109 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2pjbh"] Oct 11 07:32:19 crc kubenswrapper[5055]: I1011 07:32:19.973270 5055 generic.go:334] "Generic (PLEG): container finished" podID="3308dd2b-9640-49dd-add9-ed69e1710682" containerID="c1d7993515f950650aedc10b9703b819134cd2177926440207504d2d09082f10" exitCode=0 Oct 11 07:32:19 crc kubenswrapper[5055]: I1011 07:32:19.973325 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pjbh" event={"ID":"3308dd2b-9640-49dd-add9-ed69e1710682","Type":"ContainerDied","Data":"c1d7993515f950650aedc10b9703b819134cd2177926440207504d2d09082f10"} Oct 11 07:32:19 crc kubenswrapper[5055]: I1011 07:32:19.973351 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pjbh" event={"ID":"3308dd2b-9640-49dd-add9-ed69e1710682","Type":"ContainerStarted","Data":"c57ffb91a959ac3f05931c2de11f50e0d34e6bb62f1a4f8706b418eb20a67db3"} Oct 11 07:32:19 crc kubenswrapper[5055]: I1011 07:32:19.995984 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:32:19 crc kubenswrapper[5055]: E1011 07:32:19.996287 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:32:21 crc kubenswrapper[5055]: I1011 07:32:21.989155 5055 generic.go:334] "Generic (PLEG): container finished" podID="3308dd2b-9640-49dd-add9-ed69e1710682" containerID="b153dc7b0db229127fd5e003b4337eb63f2986cf1fb1febd9dae3b95310ad1ef" exitCode=0 Oct 11 07:32:21 crc kubenswrapper[5055]: I1011 07:32:21.989196 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pjbh" event={"ID":"3308dd2b-9640-49dd-add9-ed69e1710682","Type":"ContainerDied","Data":"b153dc7b0db229127fd5e003b4337eb63f2986cf1fb1febd9dae3b95310ad1ef"} Oct 11 07:32:23 crc kubenswrapper[5055]: I1011 07:32:23.001368 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pjbh" event={"ID":"3308dd2b-9640-49dd-add9-ed69e1710682","Type":"ContainerStarted","Data":"cda44e2c5015b83f96e88e31ce8c7d5a57e6bf4351f7822c7a459ceeee497188"} Oct 11 07:32:23 crc kubenswrapper[5055]: I1011 07:32:23.023976 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2pjbh" podStartSLOduration=2.51597867 podStartE2EDuration="5.02395816s" podCreationTimestamp="2025-10-11 07:32:18 +0000 UTC" firstStartedPulling="2025-10-11 07:32:19.975579314 +0000 UTC m=+2323.749853121" lastFinishedPulling="2025-10-11 07:32:22.483558804 +0000 UTC m=+2326.257832611" observedRunningTime="2025-10-11 07:32:23.020275475 +0000 UTC m=+2326.794549282" watchObservedRunningTime="2025-10-11 07:32:23.02395816 +0000 UTC m=+2326.798231967" Oct 11 07:32:28 crc kubenswrapper[5055]: I1011 07:32:28.831577 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:28 crc kubenswrapper[5055]: I1011 07:32:28.832125 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:28 crc kubenswrapper[5055]: I1011 07:32:28.871036 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:29 crc kubenswrapper[5055]: I1011 07:32:29.075636 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:29 crc kubenswrapper[5055]: I1011 07:32:29.115046 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2pjbh"] Oct 11 07:32:31 crc kubenswrapper[5055]: I1011 07:32:31.052021 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2pjbh" podUID="3308dd2b-9640-49dd-add9-ed69e1710682" containerName="registry-server" containerID="cri-o://cda44e2c5015b83f96e88e31ce8c7d5a57e6bf4351f7822c7a459ceeee497188" gracePeriod=2 Oct 11 07:32:31 crc kubenswrapper[5055]: I1011 07:32:31.458837 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:31 crc kubenswrapper[5055]: I1011 07:32:31.533000 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3308dd2b-9640-49dd-add9-ed69e1710682-catalog-content\") pod \"3308dd2b-9640-49dd-add9-ed69e1710682\" (UID: \"3308dd2b-9640-49dd-add9-ed69e1710682\") " Oct 11 07:32:31 crc kubenswrapper[5055]: I1011 07:32:31.533104 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfzdm\" (UniqueName: \"kubernetes.io/projected/3308dd2b-9640-49dd-add9-ed69e1710682-kube-api-access-mfzdm\") pod \"3308dd2b-9640-49dd-add9-ed69e1710682\" (UID: \"3308dd2b-9640-49dd-add9-ed69e1710682\") " Oct 11 07:32:31 crc kubenswrapper[5055]: I1011 07:32:31.533126 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3308dd2b-9640-49dd-add9-ed69e1710682-utilities\") pod \"3308dd2b-9640-49dd-add9-ed69e1710682\" (UID: \"3308dd2b-9640-49dd-add9-ed69e1710682\") " Oct 11 07:32:31 crc kubenswrapper[5055]: I1011 07:32:31.533960 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3308dd2b-9640-49dd-add9-ed69e1710682-utilities" (OuterVolumeSpecName: "utilities") pod "3308dd2b-9640-49dd-add9-ed69e1710682" (UID: "3308dd2b-9640-49dd-add9-ed69e1710682"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:32:31 crc kubenswrapper[5055]: I1011 07:32:31.538928 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3308dd2b-9640-49dd-add9-ed69e1710682-kube-api-access-mfzdm" (OuterVolumeSpecName: "kube-api-access-mfzdm") pod "3308dd2b-9640-49dd-add9-ed69e1710682" (UID: "3308dd2b-9640-49dd-add9-ed69e1710682"). InnerVolumeSpecName "kube-api-access-mfzdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:32:31 crc kubenswrapper[5055]: I1011 07:32:31.627345 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3308dd2b-9640-49dd-add9-ed69e1710682-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3308dd2b-9640-49dd-add9-ed69e1710682" (UID: "3308dd2b-9640-49dd-add9-ed69e1710682"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:32:31 crc kubenswrapper[5055]: I1011 07:32:31.634727 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3308dd2b-9640-49dd-add9-ed69e1710682-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:32:31 crc kubenswrapper[5055]: I1011 07:32:31.634884 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mfzdm\" (UniqueName: \"kubernetes.io/projected/3308dd2b-9640-49dd-add9-ed69e1710682-kube-api-access-mfzdm\") on node \"crc\" DevicePath \"\"" Oct 11 07:32:31 crc kubenswrapper[5055]: I1011 07:32:31.634899 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3308dd2b-9640-49dd-add9-ed69e1710682-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.062986 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2pjbh" Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.062955 5055 generic.go:334] "Generic (PLEG): container finished" podID="3308dd2b-9640-49dd-add9-ed69e1710682" containerID="cda44e2c5015b83f96e88e31ce8c7d5a57e6bf4351f7822c7a459ceeee497188" exitCode=0 Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.063034 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pjbh" event={"ID":"3308dd2b-9640-49dd-add9-ed69e1710682","Type":"ContainerDied","Data":"cda44e2c5015b83f96e88e31ce8c7d5a57e6bf4351f7822c7a459ceeee497188"} Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.063643 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2pjbh" event={"ID":"3308dd2b-9640-49dd-add9-ed69e1710682","Type":"ContainerDied","Data":"c57ffb91a959ac3f05931c2de11f50e0d34e6bb62f1a4f8706b418eb20a67db3"} Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.063691 5055 scope.go:117] "RemoveContainer" containerID="cda44e2c5015b83f96e88e31ce8c7d5a57e6bf4351f7822c7a459ceeee497188" Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.085574 5055 scope.go:117] "RemoveContainer" containerID="b153dc7b0db229127fd5e003b4337eb63f2986cf1fb1febd9dae3b95310ad1ef" Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.111695 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2pjbh"] Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.112441 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2pjbh"] Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.121745 5055 scope.go:117] "RemoveContainer" containerID="c1d7993515f950650aedc10b9703b819134cd2177926440207504d2d09082f10" Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.153134 5055 scope.go:117] "RemoveContainer" containerID="cda44e2c5015b83f96e88e31ce8c7d5a57e6bf4351f7822c7a459ceeee497188" Oct 11 07:32:32 crc kubenswrapper[5055]: E1011 07:32:32.153689 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cda44e2c5015b83f96e88e31ce8c7d5a57e6bf4351f7822c7a459ceeee497188\": container with ID starting with cda44e2c5015b83f96e88e31ce8c7d5a57e6bf4351f7822c7a459ceeee497188 not found: ID does not exist" containerID="cda44e2c5015b83f96e88e31ce8c7d5a57e6bf4351f7822c7a459ceeee497188" Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.153737 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cda44e2c5015b83f96e88e31ce8c7d5a57e6bf4351f7822c7a459ceeee497188"} err="failed to get container status \"cda44e2c5015b83f96e88e31ce8c7d5a57e6bf4351f7822c7a459ceeee497188\": rpc error: code = NotFound desc = could not find container \"cda44e2c5015b83f96e88e31ce8c7d5a57e6bf4351f7822c7a459ceeee497188\": container with ID starting with cda44e2c5015b83f96e88e31ce8c7d5a57e6bf4351f7822c7a459ceeee497188 not found: ID does not exist" Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.153858 5055 scope.go:117] "RemoveContainer" containerID="b153dc7b0db229127fd5e003b4337eb63f2986cf1fb1febd9dae3b95310ad1ef" Oct 11 07:32:32 crc kubenswrapper[5055]: E1011 07:32:32.154229 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b153dc7b0db229127fd5e003b4337eb63f2986cf1fb1febd9dae3b95310ad1ef\": container with ID starting with b153dc7b0db229127fd5e003b4337eb63f2986cf1fb1febd9dae3b95310ad1ef not found: ID does not exist" containerID="b153dc7b0db229127fd5e003b4337eb63f2986cf1fb1febd9dae3b95310ad1ef" Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.154259 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b153dc7b0db229127fd5e003b4337eb63f2986cf1fb1febd9dae3b95310ad1ef"} err="failed to get container status \"b153dc7b0db229127fd5e003b4337eb63f2986cf1fb1febd9dae3b95310ad1ef\": rpc error: code = NotFound desc = could not find container \"b153dc7b0db229127fd5e003b4337eb63f2986cf1fb1febd9dae3b95310ad1ef\": container with ID starting with b153dc7b0db229127fd5e003b4337eb63f2986cf1fb1febd9dae3b95310ad1ef not found: ID does not exist" Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.154273 5055 scope.go:117] "RemoveContainer" containerID="c1d7993515f950650aedc10b9703b819134cd2177926440207504d2d09082f10" Oct 11 07:32:32 crc kubenswrapper[5055]: E1011 07:32:32.154524 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1d7993515f950650aedc10b9703b819134cd2177926440207504d2d09082f10\": container with ID starting with c1d7993515f950650aedc10b9703b819134cd2177926440207504d2d09082f10 not found: ID does not exist" containerID="c1d7993515f950650aedc10b9703b819134cd2177926440207504d2d09082f10" Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.154547 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1d7993515f950650aedc10b9703b819134cd2177926440207504d2d09082f10"} err="failed to get container status \"c1d7993515f950650aedc10b9703b819134cd2177926440207504d2d09082f10\": rpc error: code = NotFound desc = could not find container \"c1d7993515f950650aedc10b9703b819134cd2177926440207504d2d09082f10\": container with ID starting with c1d7993515f950650aedc10b9703b819134cd2177926440207504d2d09082f10 not found: ID does not exist" Oct 11 07:32:32 crc kubenswrapper[5055]: I1011 07:32:32.994155 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:32:32 crc kubenswrapper[5055]: E1011 07:32:32.994383 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:32:33 crc kubenswrapper[5055]: I1011 07:32:33.010829 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3308dd2b-9640-49dd-add9-ed69e1710682" path="/var/lib/kubelet/pods/3308dd2b-9640-49dd-add9-ed69e1710682/volumes" Oct 11 07:32:45 crc kubenswrapper[5055]: I1011 07:32:45.993706 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:32:45 crc kubenswrapper[5055]: E1011 07:32:45.994748 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:33:00 crc kubenswrapper[5055]: I1011 07:33:00.993905 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:33:00 crc kubenswrapper[5055]: E1011 07:33:00.994459 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:33:13 crc kubenswrapper[5055]: I1011 07:33:13.994061 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:33:13 crc kubenswrapper[5055]: E1011 07:33:13.994742 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.749847 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6l6qh"] Oct 11 07:33:16 crc kubenswrapper[5055]: E1011 07:33:16.752008 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3308dd2b-9640-49dd-add9-ed69e1710682" containerName="extract-utilities" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.752028 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3308dd2b-9640-49dd-add9-ed69e1710682" containerName="extract-utilities" Oct 11 07:33:16 crc kubenswrapper[5055]: E1011 07:33:16.752054 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3308dd2b-9640-49dd-add9-ed69e1710682" containerName="registry-server" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.752061 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3308dd2b-9640-49dd-add9-ed69e1710682" containerName="registry-server" Oct 11 07:33:16 crc kubenswrapper[5055]: E1011 07:33:16.752078 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3308dd2b-9640-49dd-add9-ed69e1710682" containerName="extract-content" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.752085 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="3308dd2b-9640-49dd-add9-ed69e1710682" containerName="extract-content" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.752792 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="3308dd2b-9640-49dd-add9-ed69e1710682" containerName="registry-server" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.754556 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.762997 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6l6qh"] Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.856932 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4aa6bf6-796d-44cf-8c57-c2ef36885910-utilities\") pod \"community-operators-6l6qh\" (UID: \"b4aa6bf6-796d-44cf-8c57-c2ef36885910\") " pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.856995 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzqrm\" (UniqueName: \"kubernetes.io/projected/b4aa6bf6-796d-44cf-8c57-c2ef36885910-kube-api-access-rzqrm\") pod \"community-operators-6l6qh\" (UID: \"b4aa6bf6-796d-44cf-8c57-c2ef36885910\") " pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.857161 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4aa6bf6-796d-44cf-8c57-c2ef36885910-catalog-content\") pod \"community-operators-6l6qh\" (UID: \"b4aa6bf6-796d-44cf-8c57-c2ef36885910\") " pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.958151 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4aa6bf6-796d-44cf-8c57-c2ef36885910-catalog-content\") pod \"community-operators-6l6qh\" (UID: \"b4aa6bf6-796d-44cf-8c57-c2ef36885910\") " pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.958308 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4aa6bf6-796d-44cf-8c57-c2ef36885910-utilities\") pod \"community-operators-6l6qh\" (UID: \"b4aa6bf6-796d-44cf-8c57-c2ef36885910\") " pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.958366 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzqrm\" (UniqueName: \"kubernetes.io/projected/b4aa6bf6-796d-44cf-8c57-c2ef36885910-kube-api-access-rzqrm\") pod \"community-operators-6l6qh\" (UID: \"b4aa6bf6-796d-44cf-8c57-c2ef36885910\") " pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.958823 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4aa6bf6-796d-44cf-8c57-c2ef36885910-catalog-content\") pod \"community-operators-6l6qh\" (UID: \"b4aa6bf6-796d-44cf-8c57-c2ef36885910\") " pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.959122 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4aa6bf6-796d-44cf-8c57-c2ef36885910-utilities\") pod \"community-operators-6l6qh\" (UID: \"b4aa6bf6-796d-44cf-8c57-c2ef36885910\") " pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:16 crc kubenswrapper[5055]: I1011 07:33:16.983815 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzqrm\" (UniqueName: \"kubernetes.io/projected/b4aa6bf6-796d-44cf-8c57-c2ef36885910-kube-api-access-rzqrm\") pod \"community-operators-6l6qh\" (UID: \"b4aa6bf6-796d-44cf-8c57-c2ef36885910\") " pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:17 crc kubenswrapper[5055]: I1011 07:33:17.098079 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:17 crc kubenswrapper[5055]: I1011 07:33:17.614677 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6l6qh"] Oct 11 07:33:18 crc kubenswrapper[5055]: I1011 07:33:18.428599 5055 generic.go:334] "Generic (PLEG): container finished" podID="b4aa6bf6-796d-44cf-8c57-c2ef36885910" containerID="e70cb033b04019ae001f8f31fdb648ba7852d6c8a6b4163a7b5fd4f9431fcf89" exitCode=0 Oct 11 07:33:18 crc kubenswrapper[5055]: I1011 07:33:18.428798 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6l6qh" event={"ID":"b4aa6bf6-796d-44cf-8c57-c2ef36885910","Type":"ContainerDied","Data":"e70cb033b04019ae001f8f31fdb648ba7852d6c8a6b4163a7b5fd4f9431fcf89"} Oct 11 07:33:18 crc kubenswrapper[5055]: I1011 07:33:18.429241 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6l6qh" event={"ID":"b4aa6bf6-796d-44cf-8c57-c2ef36885910","Type":"ContainerStarted","Data":"c25b8744be5f211f6fc0adca833dbe36f9d63254a28cde95df031320641dc8e1"} Oct 11 07:33:18 crc kubenswrapper[5055]: I1011 07:33:18.432530 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 07:33:21 crc kubenswrapper[5055]: I1011 07:33:21.453450 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6l6qh" event={"ID":"b4aa6bf6-796d-44cf-8c57-c2ef36885910","Type":"ContainerStarted","Data":"faf083be7e93df33f638b9449b080a8ed4c913baa8fbe72725252c56e64619b5"} Oct 11 07:33:22 crc kubenswrapper[5055]: I1011 07:33:22.464952 5055 generic.go:334] "Generic (PLEG): container finished" podID="b4aa6bf6-796d-44cf-8c57-c2ef36885910" containerID="faf083be7e93df33f638b9449b080a8ed4c913baa8fbe72725252c56e64619b5" exitCode=0 Oct 11 07:33:22 crc kubenswrapper[5055]: I1011 07:33:22.465239 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6l6qh" event={"ID":"b4aa6bf6-796d-44cf-8c57-c2ef36885910","Type":"ContainerDied","Data":"faf083be7e93df33f638b9449b080a8ed4c913baa8fbe72725252c56e64619b5"} Oct 11 07:33:23 crc kubenswrapper[5055]: I1011 07:33:23.477516 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6l6qh" event={"ID":"b4aa6bf6-796d-44cf-8c57-c2ef36885910","Type":"ContainerStarted","Data":"3f1c4e0596e59e75d69ab72919ea382f6c310103e216247285faa0d31f0a01ce"} Oct 11 07:33:23 crc kubenswrapper[5055]: I1011 07:33:23.505949 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6l6qh" podStartSLOduration=3.049656406 podStartE2EDuration="7.505928811s" podCreationTimestamp="2025-10-11 07:33:16 +0000 UTC" firstStartedPulling="2025-10-11 07:33:18.432208482 +0000 UTC m=+2382.206482299" lastFinishedPulling="2025-10-11 07:33:22.888480897 +0000 UTC m=+2386.662754704" observedRunningTime="2025-10-11 07:33:23.503585755 +0000 UTC m=+2387.277859562" watchObservedRunningTime="2025-10-11 07:33:23.505928811 +0000 UTC m=+2387.280202628" Oct 11 07:33:25 crc kubenswrapper[5055]: I1011 07:33:25.993418 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:33:25 crc kubenswrapper[5055]: E1011 07:33:25.994062 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:33:27 crc kubenswrapper[5055]: I1011 07:33:27.098802 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:27 crc kubenswrapper[5055]: I1011 07:33:27.099152 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:27 crc kubenswrapper[5055]: I1011 07:33:27.142418 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:37 crc kubenswrapper[5055]: I1011 07:33:37.150227 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6l6qh" Oct 11 07:33:37 crc kubenswrapper[5055]: I1011 07:33:37.218768 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6l6qh"] Oct 11 07:33:37 crc kubenswrapper[5055]: I1011 07:33:37.258328 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vlcm5"] Oct 11 07:33:37 crc kubenswrapper[5055]: I1011 07:33:37.258579 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vlcm5" podUID="24acda22-b4e6-4e24-b340-4595ce200507" containerName="registry-server" containerID="cri-o://d27e3d917919fc6c68033cbbda44871a16afae86753fd3791f85e9bd93b3692e" gracePeriod=2 Oct 11 07:33:37 crc kubenswrapper[5055]: I1011 07:33:37.610129 5055 generic.go:334] "Generic (PLEG): container finished" podID="24acda22-b4e6-4e24-b340-4595ce200507" containerID="d27e3d917919fc6c68033cbbda44871a16afae86753fd3791f85e9bd93b3692e" exitCode=0 Oct 11 07:33:37 crc kubenswrapper[5055]: I1011 07:33:37.610219 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vlcm5" event={"ID":"24acda22-b4e6-4e24-b340-4595ce200507","Type":"ContainerDied","Data":"d27e3d917919fc6c68033cbbda44871a16afae86753fd3791f85e9bd93b3692e"} Oct 11 07:33:37 crc kubenswrapper[5055]: I1011 07:33:37.831291 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.002188 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24acda22-b4e6-4e24-b340-4595ce200507-utilities\") pod \"24acda22-b4e6-4e24-b340-4595ce200507\" (UID: \"24acda22-b4e6-4e24-b340-4595ce200507\") " Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.002491 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24acda22-b4e6-4e24-b340-4595ce200507-catalog-content\") pod \"24acda22-b4e6-4e24-b340-4595ce200507\" (UID: \"24acda22-b4e6-4e24-b340-4595ce200507\") " Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.002673 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2tlmv\" (UniqueName: \"kubernetes.io/projected/24acda22-b4e6-4e24-b340-4595ce200507-kube-api-access-2tlmv\") pod \"24acda22-b4e6-4e24-b340-4595ce200507\" (UID: \"24acda22-b4e6-4e24-b340-4595ce200507\") " Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.002676 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24acda22-b4e6-4e24-b340-4595ce200507-utilities" (OuterVolumeSpecName: "utilities") pod "24acda22-b4e6-4e24-b340-4595ce200507" (UID: "24acda22-b4e6-4e24-b340-4595ce200507"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.003092 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24acda22-b4e6-4e24-b340-4595ce200507-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.008163 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24acda22-b4e6-4e24-b340-4595ce200507-kube-api-access-2tlmv" (OuterVolumeSpecName: "kube-api-access-2tlmv") pod "24acda22-b4e6-4e24-b340-4595ce200507" (UID: "24acda22-b4e6-4e24-b340-4595ce200507"). InnerVolumeSpecName "kube-api-access-2tlmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.048515 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24acda22-b4e6-4e24-b340-4595ce200507-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24acda22-b4e6-4e24-b340-4595ce200507" (UID: "24acda22-b4e6-4e24-b340-4595ce200507"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.104278 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2tlmv\" (UniqueName: \"kubernetes.io/projected/24acda22-b4e6-4e24-b340-4595ce200507-kube-api-access-2tlmv\") on node \"crc\" DevicePath \"\"" Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.104305 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24acda22-b4e6-4e24-b340-4595ce200507-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.619457 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vlcm5" event={"ID":"24acda22-b4e6-4e24-b340-4595ce200507","Type":"ContainerDied","Data":"16fe314f6f622d08f860191a72db515e53b71e314766c345121346f1d1d1a3a3"} Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.619515 5055 scope.go:117] "RemoveContainer" containerID="d27e3d917919fc6c68033cbbda44871a16afae86753fd3791f85e9bd93b3692e" Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.619532 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vlcm5" Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.645261 5055 scope.go:117] "RemoveContainer" containerID="8b5e9c44a02be397843d499c531960ee93b242aaae9459a5df15b432fab14acd" Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.656194 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vlcm5"] Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.661007 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vlcm5"] Oct 11 07:33:38 crc kubenswrapper[5055]: I1011 07:33:38.670661 5055 scope.go:117] "RemoveContainer" containerID="5b8e123f417beb8f1342f088f0a56b39bef8e1f6fcb37df32a8acaec180004fa" Oct 11 07:33:39 crc kubenswrapper[5055]: I1011 07:33:39.002048 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24acda22-b4e6-4e24-b340-4595ce200507" path="/var/lib/kubelet/pods/24acda22-b4e6-4e24-b340-4595ce200507/volumes" Oct 11 07:33:39 crc kubenswrapper[5055]: I1011 07:33:39.993356 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:33:39 crc kubenswrapper[5055]: E1011 07:33:39.993639 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:33:50 crc kubenswrapper[5055]: I1011 07:33:50.993937 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:33:50 crc kubenswrapper[5055]: E1011 07:33:50.994734 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:34:02 crc kubenswrapper[5055]: I1011 07:34:02.993529 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:34:02 crc kubenswrapper[5055]: E1011 07:34:02.994268 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:34:15 crc kubenswrapper[5055]: I1011 07:34:15.993531 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:34:15 crc kubenswrapper[5055]: E1011 07:34:15.994253 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:34:27 crc kubenswrapper[5055]: I1011 07:34:27.994260 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:34:27 crc kubenswrapper[5055]: E1011 07:34:27.995033 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:34:40 crc kubenswrapper[5055]: I1011 07:34:40.993843 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:34:40 crc kubenswrapper[5055]: E1011 07:34:40.996301 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:34:55 crc kubenswrapper[5055]: I1011 07:34:55.994134 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:34:55 crc kubenswrapper[5055]: E1011 07:34:55.996304 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:35:09 crc kubenswrapper[5055]: I1011 07:35:09.993405 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:35:10 crc kubenswrapper[5055]: I1011 07:35:10.265591 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"c0e12d8d61d591609af06c7fe5dfe433891df5e49ea5314db0e55df41859b6f1"} Oct 11 07:37:32 crc kubenswrapper[5055]: I1011 07:37:32.422698 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:37:32 crc kubenswrapper[5055]: I1011 07:37:32.423214 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:38:02 crc kubenswrapper[5055]: I1011 07:38:02.421632 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:38:02 crc kubenswrapper[5055]: I1011 07:38:02.422088 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.750028 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6qk9g"] Oct 11 07:38:25 crc kubenswrapper[5055]: E1011 07:38:25.751080 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24acda22-b4e6-4e24-b340-4595ce200507" containerName="extract-content" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.751129 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="24acda22-b4e6-4e24-b340-4595ce200507" containerName="extract-content" Oct 11 07:38:25 crc kubenswrapper[5055]: E1011 07:38:25.751148 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24acda22-b4e6-4e24-b340-4595ce200507" containerName="extract-utilities" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.751159 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="24acda22-b4e6-4e24-b340-4595ce200507" containerName="extract-utilities" Oct 11 07:38:25 crc kubenswrapper[5055]: E1011 07:38:25.751181 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24acda22-b4e6-4e24-b340-4595ce200507" containerName="registry-server" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.751192 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="24acda22-b4e6-4e24-b340-4595ce200507" containerName="registry-server" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.751457 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="24acda22-b4e6-4e24-b340-4595ce200507" containerName="registry-server" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.753223 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.758563 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qk9g"] Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.874378 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gj6t\" (UniqueName: \"kubernetes.io/projected/2e95db5e-31b2-40c8-b4f9-47517eb09890-kube-api-access-8gj6t\") pod \"redhat-marketplace-6qk9g\" (UID: \"2e95db5e-31b2-40c8-b4f9-47517eb09890\") " pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.875032 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e95db5e-31b2-40c8-b4f9-47517eb09890-utilities\") pod \"redhat-marketplace-6qk9g\" (UID: \"2e95db5e-31b2-40c8-b4f9-47517eb09890\") " pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.875105 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e95db5e-31b2-40c8-b4f9-47517eb09890-catalog-content\") pod \"redhat-marketplace-6qk9g\" (UID: \"2e95db5e-31b2-40c8-b4f9-47517eb09890\") " pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.976027 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e95db5e-31b2-40c8-b4f9-47517eb09890-utilities\") pod \"redhat-marketplace-6qk9g\" (UID: \"2e95db5e-31b2-40c8-b4f9-47517eb09890\") " pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.976084 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e95db5e-31b2-40c8-b4f9-47517eb09890-catalog-content\") pod \"redhat-marketplace-6qk9g\" (UID: \"2e95db5e-31b2-40c8-b4f9-47517eb09890\") " pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.976140 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gj6t\" (UniqueName: \"kubernetes.io/projected/2e95db5e-31b2-40c8-b4f9-47517eb09890-kube-api-access-8gj6t\") pod \"redhat-marketplace-6qk9g\" (UID: \"2e95db5e-31b2-40c8-b4f9-47517eb09890\") " pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.976509 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e95db5e-31b2-40c8-b4f9-47517eb09890-utilities\") pod \"redhat-marketplace-6qk9g\" (UID: \"2e95db5e-31b2-40c8-b4f9-47517eb09890\") " pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.976613 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e95db5e-31b2-40c8-b4f9-47517eb09890-catalog-content\") pod \"redhat-marketplace-6qk9g\" (UID: \"2e95db5e-31b2-40c8-b4f9-47517eb09890\") " pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:25 crc kubenswrapper[5055]: I1011 07:38:25.995328 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gj6t\" (UniqueName: \"kubernetes.io/projected/2e95db5e-31b2-40c8-b4f9-47517eb09890-kube-api-access-8gj6t\") pod \"redhat-marketplace-6qk9g\" (UID: \"2e95db5e-31b2-40c8-b4f9-47517eb09890\") " pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:26 crc kubenswrapper[5055]: I1011 07:38:26.112820 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:26 crc kubenswrapper[5055]: I1011 07:38:26.517205 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qk9g"] Oct 11 07:38:26 crc kubenswrapper[5055]: I1011 07:38:26.738006 5055 generic.go:334] "Generic (PLEG): container finished" podID="2e95db5e-31b2-40c8-b4f9-47517eb09890" containerID="a1055f11ca8efed24b4fac28e8fbf5797a39780f3fd7f15a392d76d8f4783c47" exitCode=0 Oct 11 07:38:26 crc kubenswrapper[5055]: I1011 07:38:26.738093 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qk9g" event={"ID":"2e95db5e-31b2-40c8-b4f9-47517eb09890","Type":"ContainerDied","Data":"a1055f11ca8efed24b4fac28e8fbf5797a39780f3fd7f15a392d76d8f4783c47"} Oct 11 07:38:26 crc kubenswrapper[5055]: I1011 07:38:26.738388 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qk9g" event={"ID":"2e95db5e-31b2-40c8-b4f9-47517eb09890","Type":"ContainerStarted","Data":"12d7924a30e7041e1ef6e9a712d2a8c6d5270f2a4e74bd341365a4256ddaf8ef"} Oct 11 07:38:26 crc kubenswrapper[5055]: I1011 07:38:26.739465 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 07:38:27 crc kubenswrapper[5055]: I1011 07:38:27.749324 5055 generic.go:334] "Generic (PLEG): container finished" podID="2e95db5e-31b2-40c8-b4f9-47517eb09890" containerID="f2c845d247fc98941f48d73e3ff6bf817e373b25a21f08d6b35f129e0619125a" exitCode=0 Oct 11 07:38:27 crc kubenswrapper[5055]: I1011 07:38:27.749381 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qk9g" event={"ID":"2e95db5e-31b2-40c8-b4f9-47517eb09890","Type":"ContainerDied","Data":"f2c845d247fc98941f48d73e3ff6bf817e373b25a21f08d6b35f129e0619125a"} Oct 11 07:38:28 crc kubenswrapper[5055]: I1011 07:38:28.759087 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qk9g" event={"ID":"2e95db5e-31b2-40c8-b4f9-47517eb09890","Type":"ContainerStarted","Data":"fe76768c9de5ea85374acc710fd4fdc1c87d9f20f7f17eada85c70a760912424"} Oct 11 07:38:28 crc kubenswrapper[5055]: I1011 07:38:28.780800 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6qk9g" podStartSLOduration=2.403615125 podStartE2EDuration="3.780759384s" podCreationTimestamp="2025-10-11 07:38:25 +0000 UTC" firstStartedPulling="2025-10-11 07:38:26.739247696 +0000 UTC m=+2690.513521503" lastFinishedPulling="2025-10-11 07:38:28.116391955 +0000 UTC m=+2691.890665762" observedRunningTime="2025-10-11 07:38:28.773639712 +0000 UTC m=+2692.547913529" watchObservedRunningTime="2025-10-11 07:38:28.780759384 +0000 UTC m=+2692.555033211" Oct 11 07:38:32 crc kubenswrapper[5055]: I1011 07:38:32.422868 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:38:32 crc kubenswrapper[5055]: I1011 07:38:32.423256 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:38:32 crc kubenswrapper[5055]: I1011 07:38:32.423318 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 07:38:32 crc kubenswrapper[5055]: I1011 07:38:32.424076 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c0e12d8d61d591609af06c7fe5dfe433891df5e49ea5314db0e55df41859b6f1"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 07:38:32 crc kubenswrapper[5055]: I1011 07:38:32.424132 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://c0e12d8d61d591609af06c7fe5dfe433891df5e49ea5314db0e55df41859b6f1" gracePeriod=600 Oct 11 07:38:32 crc kubenswrapper[5055]: I1011 07:38:32.800577 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="c0e12d8d61d591609af06c7fe5dfe433891df5e49ea5314db0e55df41859b6f1" exitCode=0 Oct 11 07:38:32 crc kubenswrapper[5055]: I1011 07:38:32.800646 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"c0e12d8d61d591609af06c7fe5dfe433891df5e49ea5314db0e55df41859b6f1"} Oct 11 07:38:32 crc kubenswrapper[5055]: I1011 07:38:32.800886 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71"} Oct 11 07:38:32 crc kubenswrapper[5055]: I1011 07:38:32.800909 5055 scope.go:117] "RemoveContainer" containerID="bb13473a68a6f1c05ef155a447e9a33f3a43ac74422d070d370e6bfccbe33590" Oct 11 07:38:36 crc kubenswrapper[5055]: I1011 07:38:36.114505 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:36 crc kubenswrapper[5055]: I1011 07:38:36.116884 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:36 crc kubenswrapper[5055]: I1011 07:38:36.163249 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:36 crc kubenswrapper[5055]: I1011 07:38:36.880884 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:36 crc kubenswrapper[5055]: I1011 07:38:36.928971 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qk9g"] Oct 11 07:38:38 crc kubenswrapper[5055]: I1011 07:38:38.845307 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6qk9g" podUID="2e95db5e-31b2-40c8-b4f9-47517eb09890" containerName="registry-server" containerID="cri-o://fe76768c9de5ea85374acc710fd4fdc1c87d9f20f7f17eada85c70a760912424" gracePeriod=2 Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.249925 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.387821 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gj6t\" (UniqueName: \"kubernetes.io/projected/2e95db5e-31b2-40c8-b4f9-47517eb09890-kube-api-access-8gj6t\") pod \"2e95db5e-31b2-40c8-b4f9-47517eb09890\" (UID: \"2e95db5e-31b2-40c8-b4f9-47517eb09890\") " Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.388847 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e95db5e-31b2-40c8-b4f9-47517eb09890-catalog-content\") pod \"2e95db5e-31b2-40c8-b4f9-47517eb09890\" (UID: \"2e95db5e-31b2-40c8-b4f9-47517eb09890\") " Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.388964 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e95db5e-31b2-40c8-b4f9-47517eb09890-utilities\") pod \"2e95db5e-31b2-40c8-b4f9-47517eb09890\" (UID: \"2e95db5e-31b2-40c8-b4f9-47517eb09890\") " Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.389698 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e95db5e-31b2-40c8-b4f9-47517eb09890-utilities" (OuterVolumeSpecName: "utilities") pod "2e95db5e-31b2-40c8-b4f9-47517eb09890" (UID: "2e95db5e-31b2-40c8-b4f9-47517eb09890"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.392842 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e95db5e-31b2-40c8-b4f9-47517eb09890-kube-api-access-8gj6t" (OuterVolumeSpecName: "kube-api-access-8gj6t") pod "2e95db5e-31b2-40c8-b4f9-47517eb09890" (UID: "2e95db5e-31b2-40c8-b4f9-47517eb09890"). InnerVolumeSpecName "kube-api-access-8gj6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.403985 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e95db5e-31b2-40c8-b4f9-47517eb09890-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2e95db5e-31b2-40c8-b4f9-47517eb09890" (UID: "2e95db5e-31b2-40c8-b4f9-47517eb09890"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.490427 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e95db5e-31b2-40c8-b4f9-47517eb09890-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.490457 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gj6t\" (UniqueName: \"kubernetes.io/projected/2e95db5e-31b2-40c8-b4f9-47517eb09890-kube-api-access-8gj6t\") on node \"crc\" DevicePath \"\"" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.490468 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e95db5e-31b2-40c8-b4f9-47517eb09890-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.855093 5055 generic.go:334] "Generic (PLEG): container finished" podID="2e95db5e-31b2-40c8-b4f9-47517eb09890" containerID="fe76768c9de5ea85374acc710fd4fdc1c87d9f20f7f17eada85c70a760912424" exitCode=0 Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.855136 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qk9g" event={"ID":"2e95db5e-31b2-40c8-b4f9-47517eb09890","Type":"ContainerDied","Data":"fe76768c9de5ea85374acc710fd4fdc1c87d9f20f7f17eada85c70a760912424"} Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.855152 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6qk9g" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.855160 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qk9g" event={"ID":"2e95db5e-31b2-40c8-b4f9-47517eb09890","Type":"ContainerDied","Data":"12d7924a30e7041e1ef6e9a712d2a8c6d5270f2a4e74bd341365a4256ddaf8ef"} Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.855186 5055 scope.go:117] "RemoveContainer" containerID="fe76768c9de5ea85374acc710fd4fdc1c87d9f20f7f17eada85c70a760912424" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.883433 5055 scope.go:117] "RemoveContainer" containerID="f2c845d247fc98941f48d73e3ff6bf817e373b25a21f08d6b35f129e0619125a" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.892283 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qk9g"] Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.904304 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qk9g"] Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.917519 5055 scope.go:117] "RemoveContainer" containerID="a1055f11ca8efed24b4fac28e8fbf5797a39780f3fd7f15a392d76d8f4783c47" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.932725 5055 scope.go:117] "RemoveContainer" containerID="fe76768c9de5ea85374acc710fd4fdc1c87d9f20f7f17eada85c70a760912424" Oct 11 07:38:39 crc kubenswrapper[5055]: E1011 07:38:39.933442 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe76768c9de5ea85374acc710fd4fdc1c87d9f20f7f17eada85c70a760912424\": container with ID starting with fe76768c9de5ea85374acc710fd4fdc1c87d9f20f7f17eada85c70a760912424 not found: ID does not exist" containerID="fe76768c9de5ea85374acc710fd4fdc1c87d9f20f7f17eada85c70a760912424" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.933522 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe76768c9de5ea85374acc710fd4fdc1c87d9f20f7f17eada85c70a760912424"} err="failed to get container status \"fe76768c9de5ea85374acc710fd4fdc1c87d9f20f7f17eada85c70a760912424\": rpc error: code = NotFound desc = could not find container \"fe76768c9de5ea85374acc710fd4fdc1c87d9f20f7f17eada85c70a760912424\": container with ID starting with fe76768c9de5ea85374acc710fd4fdc1c87d9f20f7f17eada85c70a760912424 not found: ID does not exist" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.933548 5055 scope.go:117] "RemoveContainer" containerID="f2c845d247fc98941f48d73e3ff6bf817e373b25a21f08d6b35f129e0619125a" Oct 11 07:38:39 crc kubenswrapper[5055]: E1011 07:38:39.933921 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2c845d247fc98941f48d73e3ff6bf817e373b25a21f08d6b35f129e0619125a\": container with ID starting with f2c845d247fc98941f48d73e3ff6bf817e373b25a21f08d6b35f129e0619125a not found: ID does not exist" containerID="f2c845d247fc98941f48d73e3ff6bf817e373b25a21f08d6b35f129e0619125a" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.933948 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2c845d247fc98941f48d73e3ff6bf817e373b25a21f08d6b35f129e0619125a"} err="failed to get container status \"f2c845d247fc98941f48d73e3ff6bf817e373b25a21f08d6b35f129e0619125a\": rpc error: code = NotFound desc = could not find container \"f2c845d247fc98941f48d73e3ff6bf817e373b25a21f08d6b35f129e0619125a\": container with ID starting with f2c845d247fc98941f48d73e3ff6bf817e373b25a21f08d6b35f129e0619125a not found: ID does not exist" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.933963 5055 scope.go:117] "RemoveContainer" containerID="a1055f11ca8efed24b4fac28e8fbf5797a39780f3fd7f15a392d76d8f4783c47" Oct 11 07:38:39 crc kubenswrapper[5055]: E1011 07:38:39.934384 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1055f11ca8efed24b4fac28e8fbf5797a39780f3fd7f15a392d76d8f4783c47\": container with ID starting with a1055f11ca8efed24b4fac28e8fbf5797a39780f3fd7f15a392d76d8f4783c47 not found: ID does not exist" containerID="a1055f11ca8efed24b4fac28e8fbf5797a39780f3fd7f15a392d76d8f4783c47" Oct 11 07:38:39 crc kubenswrapper[5055]: I1011 07:38:39.934414 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1055f11ca8efed24b4fac28e8fbf5797a39780f3fd7f15a392d76d8f4783c47"} err="failed to get container status \"a1055f11ca8efed24b4fac28e8fbf5797a39780f3fd7f15a392d76d8f4783c47\": rpc error: code = NotFound desc = could not find container \"a1055f11ca8efed24b4fac28e8fbf5797a39780f3fd7f15a392d76d8f4783c47\": container with ID starting with a1055f11ca8efed24b4fac28e8fbf5797a39780f3fd7f15a392d76d8f4783c47 not found: ID does not exist" Oct 11 07:38:41 crc kubenswrapper[5055]: I1011 07:38:41.004868 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e95db5e-31b2-40c8-b4f9-47517eb09890" path="/var/lib/kubelet/pods/2e95db5e-31b2-40c8-b4f9-47517eb09890/volumes" Oct 11 07:40:32 crc kubenswrapper[5055]: I1011 07:40:32.422552 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:40:32 crc kubenswrapper[5055]: I1011 07:40:32.423418 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:41:02 crc kubenswrapper[5055]: I1011 07:41:02.421989 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:41:02 crc kubenswrapper[5055]: I1011 07:41:02.422683 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:41:32 crc kubenswrapper[5055]: I1011 07:41:32.422116 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:41:32 crc kubenswrapper[5055]: I1011 07:41:32.422575 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:41:32 crc kubenswrapper[5055]: I1011 07:41:32.422622 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 07:41:32 crc kubenswrapper[5055]: I1011 07:41:32.423534 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 07:41:32 crc kubenswrapper[5055]: I1011 07:41:32.423620 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" gracePeriod=600 Oct 11 07:41:32 crc kubenswrapper[5055]: E1011 07:41:32.548407 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:41:33 crc kubenswrapper[5055]: I1011 07:41:33.138685 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" exitCode=0 Oct 11 07:41:33 crc kubenswrapper[5055]: I1011 07:41:33.138732 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71"} Oct 11 07:41:33 crc kubenswrapper[5055]: I1011 07:41:33.138835 5055 scope.go:117] "RemoveContainer" containerID="c0e12d8d61d591609af06c7fe5dfe433891df5e49ea5314db0e55df41859b6f1" Oct 11 07:41:33 crc kubenswrapper[5055]: I1011 07:41:33.139321 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:41:33 crc kubenswrapper[5055]: E1011 07:41:33.139545 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:41:45 crc kubenswrapper[5055]: I1011 07:41:45.993618 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:41:45 crc kubenswrapper[5055]: E1011 07:41:45.994307 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:41:56 crc kubenswrapper[5055]: I1011 07:41:56.997279 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:41:56 crc kubenswrapper[5055]: E1011 07:41:56.998104 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:42:07 crc kubenswrapper[5055]: I1011 07:42:07.993507 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:42:07 crc kubenswrapper[5055]: E1011 07:42:07.995877 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.366948 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-w2f2d"] Oct 11 07:42:20 crc kubenswrapper[5055]: E1011 07:42:20.367675 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e95db5e-31b2-40c8-b4f9-47517eb09890" containerName="extract-utilities" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.367687 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e95db5e-31b2-40c8-b4f9-47517eb09890" containerName="extract-utilities" Oct 11 07:42:20 crc kubenswrapper[5055]: E1011 07:42:20.367718 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e95db5e-31b2-40c8-b4f9-47517eb09890" containerName="extract-content" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.367744 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e95db5e-31b2-40c8-b4f9-47517eb09890" containerName="extract-content" Oct 11 07:42:20 crc kubenswrapper[5055]: E1011 07:42:20.367783 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e95db5e-31b2-40c8-b4f9-47517eb09890" containerName="registry-server" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.367790 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e95db5e-31b2-40c8-b4f9-47517eb09890" containerName="registry-server" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.367931 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e95db5e-31b2-40c8-b4f9-47517eb09890" containerName="registry-server" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.368903 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.393969 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-utilities\") pod \"certified-operators-w2f2d\" (UID: \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\") " pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.394082 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msj4q\" (UniqueName: \"kubernetes.io/projected/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-kube-api-access-msj4q\") pod \"certified-operators-w2f2d\" (UID: \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\") " pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.394403 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-catalog-content\") pod \"certified-operators-w2f2d\" (UID: \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\") " pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.399300 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w2f2d"] Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.495205 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-catalog-content\") pod \"certified-operators-w2f2d\" (UID: \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\") " pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.495251 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-utilities\") pod \"certified-operators-w2f2d\" (UID: \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\") " pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.495273 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msj4q\" (UniqueName: \"kubernetes.io/projected/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-kube-api-access-msj4q\") pod \"certified-operators-w2f2d\" (UID: \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\") " pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.495699 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-catalog-content\") pod \"certified-operators-w2f2d\" (UID: \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\") " pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.495825 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-utilities\") pod \"certified-operators-w2f2d\" (UID: \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\") " pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.525900 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msj4q\" (UniqueName: \"kubernetes.io/projected/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-kube-api-access-msj4q\") pod \"certified-operators-w2f2d\" (UID: \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\") " pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.698713 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:20 crc kubenswrapper[5055]: I1011 07:42:20.995663 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:42:20 crc kubenswrapper[5055]: E1011 07:42:20.996020 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:42:21 crc kubenswrapper[5055]: I1011 07:42:21.164960 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w2f2d"] Oct 11 07:42:21 crc kubenswrapper[5055]: I1011 07:42:21.521192 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" containerID="c60f405f202c062a11907347691c3cd8d792d318797c7f8b788f9bbb47f00472" exitCode=0 Oct 11 07:42:21 crc kubenswrapper[5055]: I1011 07:42:21.521262 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w2f2d" event={"ID":"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c","Type":"ContainerDied","Data":"c60f405f202c062a11907347691c3cd8d792d318797c7f8b788f9bbb47f00472"} Oct 11 07:42:21 crc kubenswrapper[5055]: I1011 07:42:21.521591 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w2f2d" event={"ID":"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c","Type":"ContainerStarted","Data":"439bd35ce836590afecc53005674bd1724169b7d262d295ed912c3abbf448661"} Oct 11 07:42:22 crc kubenswrapper[5055]: I1011 07:42:22.538622 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w2f2d" event={"ID":"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c","Type":"ContainerStarted","Data":"0fc3ed0f8c4085395bc3b4da63da43d55966a6a6139e30ecd7bb446275c0319b"} Oct 11 07:42:23 crc kubenswrapper[5055]: I1011 07:42:23.548242 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" containerID="0fc3ed0f8c4085395bc3b4da63da43d55966a6a6139e30ecd7bb446275c0319b" exitCode=0 Oct 11 07:42:23 crc kubenswrapper[5055]: I1011 07:42:23.548280 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w2f2d" event={"ID":"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c","Type":"ContainerDied","Data":"0fc3ed0f8c4085395bc3b4da63da43d55966a6a6139e30ecd7bb446275c0319b"} Oct 11 07:42:24 crc kubenswrapper[5055]: I1011 07:42:24.555802 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w2f2d" event={"ID":"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c","Type":"ContainerStarted","Data":"7519e204de9a0810007a0811aa6ab927e7406c8394c0953ca0613cb20b76afd5"} Oct 11 07:42:24 crc kubenswrapper[5055]: I1011 07:42:24.570610 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-w2f2d" podStartSLOduration=2.135645742 podStartE2EDuration="4.570594575s" podCreationTimestamp="2025-10-11 07:42:20 +0000 UTC" firstStartedPulling="2025-10-11 07:42:21.522548207 +0000 UTC m=+2925.296822014" lastFinishedPulling="2025-10-11 07:42:23.95749704 +0000 UTC m=+2927.731770847" observedRunningTime="2025-10-11 07:42:24.570165543 +0000 UTC m=+2928.344439350" watchObservedRunningTime="2025-10-11 07:42:24.570594575 +0000 UTC m=+2928.344868382" Oct 11 07:42:30 crc kubenswrapper[5055]: I1011 07:42:30.699560 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:30 crc kubenswrapper[5055]: I1011 07:42:30.700156 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:30 crc kubenswrapper[5055]: I1011 07:42:30.738043 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:31 crc kubenswrapper[5055]: I1011 07:42:31.645649 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:31 crc kubenswrapper[5055]: I1011 07:42:31.699220 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w2f2d"] Oct 11 07:42:32 crc kubenswrapper[5055]: I1011 07:42:32.993672 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:42:32 crc kubenswrapper[5055]: E1011 07:42:32.994207 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:42:33 crc kubenswrapper[5055]: I1011 07:42:33.613283 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-w2f2d" podUID="b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" containerName="registry-server" containerID="cri-o://7519e204de9a0810007a0811aa6ab927e7406c8394c0953ca0613cb20b76afd5" gracePeriod=2 Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.014461 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.215608 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msj4q\" (UniqueName: \"kubernetes.io/projected/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-kube-api-access-msj4q\") pod \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\" (UID: \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\") " Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.215660 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-utilities\") pod \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\" (UID: \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\") " Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.215722 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-catalog-content\") pod \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\" (UID: \"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c\") " Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.217563 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-utilities" (OuterVolumeSpecName: "utilities") pod "b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" (UID: "b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.226939 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-kube-api-access-msj4q" (OuterVolumeSpecName: "kube-api-access-msj4q") pod "b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" (UID: "b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c"). InnerVolumeSpecName "kube-api-access-msj4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.260835 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" (UID: "b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.319320 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msj4q\" (UniqueName: \"kubernetes.io/projected/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-kube-api-access-msj4q\") on node \"crc\" DevicePath \"\"" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.319359 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.319369 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.623636 5055 generic.go:334] "Generic (PLEG): container finished" podID="b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" containerID="7519e204de9a0810007a0811aa6ab927e7406c8394c0953ca0613cb20b76afd5" exitCode=0 Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.623681 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w2f2d" event={"ID":"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c","Type":"ContainerDied","Data":"7519e204de9a0810007a0811aa6ab927e7406c8394c0953ca0613cb20b76afd5"} Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.623709 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w2f2d" event={"ID":"b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c","Type":"ContainerDied","Data":"439bd35ce836590afecc53005674bd1724169b7d262d295ed912c3abbf448661"} Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.623727 5055 scope.go:117] "RemoveContainer" containerID="7519e204de9a0810007a0811aa6ab927e7406c8394c0953ca0613cb20b76afd5" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.623730 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w2f2d" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.647066 5055 scope.go:117] "RemoveContainer" containerID="0fc3ed0f8c4085395bc3b4da63da43d55966a6a6139e30ecd7bb446275c0319b" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.672178 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w2f2d"] Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.682715 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-w2f2d"] Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.689807 5055 scope.go:117] "RemoveContainer" containerID="c60f405f202c062a11907347691c3cd8d792d318797c7f8b788f9bbb47f00472" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.712466 5055 scope.go:117] "RemoveContainer" containerID="7519e204de9a0810007a0811aa6ab927e7406c8394c0953ca0613cb20b76afd5" Oct 11 07:42:34 crc kubenswrapper[5055]: E1011 07:42:34.712948 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7519e204de9a0810007a0811aa6ab927e7406c8394c0953ca0613cb20b76afd5\": container with ID starting with 7519e204de9a0810007a0811aa6ab927e7406c8394c0953ca0613cb20b76afd5 not found: ID does not exist" containerID="7519e204de9a0810007a0811aa6ab927e7406c8394c0953ca0613cb20b76afd5" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.713007 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7519e204de9a0810007a0811aa6ab927e7406c8394c0953ca0613cb20b76afd5"} err="failed to get container status \"7519e204de9a0810007a0811aa6ab927e7406c8394c0953ca0613cb20b76afd5\": rpc error: code = NotFound desc = could not find container \"7519e204de9a0810007a0811aa6ab927e7406c8394c0953ca0613cb20b76afd5\": container with ID starting with 7519e204de9a0810007a0811aa6ab927e7406c8394c0953ca0613cb20b76afd5 not found: ID does not exist" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.713040 5055 scope.go:117] "RemoveContainer" containerID="0fc3ed0f8c4085395bc3b4da63da43d55966a6a6139e30ecd7bb446275c0319b" Oct 11 07:42:34 crc kubenswrapper[5055]: E1011 07:42:34.713604 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fc3ed0f8c4085395bc3b4da63da43d55966a6a6139e30ecd7bb446275c0319b\": container with ID starting with 0fc3ed0f8c4085395bc3b4da63da43d55966a6a6139e30ecd7bb446275c0319b not found: ID does not exist" containerID="0fc3ed0f8c4085395bc3b4da63da43d55966a6a6139e30ecd7bb446275c0319b" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.713645 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fc3ed0f8c4085395bc3b4da63da43d55966a6a6139e30ecd7bb446275c0319b"} err="failed to get container status \"0fc3ed0f8c4085395bc3b4da63da43d55966a6a6139e30ecd7bb446275c0319b\": rpc error: code = NotFound desc = could not find container \"0fc3ed0f8c4085395bc3b4da63da43d55966a6a6139e30ecd7bb446275c0319b\": container with ID starting with 0fc3ed0f8c4085395bc3b4da63da43d55966a6a6139e30ecd7bb446275c0319b not found: ID does not exist" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.713671 5055 scope.go:117] "RemoveContainer" containerID="c60f405f202c062a11907347691c3cd8d792d318797c7f8b788f9bbb47f00472" Oct 11 07:42:34 crc kubenswrapper[5055]: E1011 07:42:34.714271 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c60f405f202c062a11907347691c3cd8d792d318797c7f8b788f9bbb47f00472\": container with ID starting with c60f405f202c062a11907347691c3cd8d792d318797c7f8b788f9bbb47f00472 not found: ID does not exist" containerID="c60f405f202c062a11907347691c3cd8d792d318797c7f8b788f9bbb47f00472" Oct 11 07:42:34 crc kubenswrapper[5055]: I1011 07:42:34.714302 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c60f405f202c062a11907347691c3cd8d792d318797c7f8b788f9bbb47f00472"} err="failed to get container status \"c60f405f202c062a11907347691c3cd8d792d318797c7f8b788f9bbb47f00472\": rpc error: code = NotFound desc = could not find container \"c60f405f202c062a11907347691c3cd8d792d318797c7f8b788f9bbb47f00472\": container with ID starting with c60f405f202c062a11907347691c3cd8d792d318797c7f8b788f9bbb47f00472 not found: ID does not exist" Oct 11 07:42:35 crc kubenswrapper[5055]: I1011 07:42:35.002145 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" path="/var/lib/kubelet/pods/b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c/volumes" Oct 11 07:42:45 crc kubenswrapper[5055]: I1011 07:42:45.993536 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:42:45 crc kubenswrapper[5055]: E1011 07:42:45.994704 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:43:00 crc kubenswrapper[5055]: I1011 07:43:00.993958 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:43:00 crc kubenswrapper[5055]: E1011 07:43:00.994723 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:43:02 crc kubenswrapper[5055]: I1011 07:43:02.806361 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zpdtt"] Oct 11 07:43:02 crc kubenswrapper[5055]: E1011 07:43:02.807066 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" containerName="extract-utilities" Oct 11 07:43:02 crc kubenswrapper[5055]: I1011 07:43:02.807079 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" containerName="extract-utilities" Oct 11 07:43:02 crc kubenswrapper[5055]: E1011 07:43:02.807089 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" containerName="registry-server" Oct 11 07:43:02 crc kubenswrapper[5055]: I1011 07:43:02.807096 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" containerName="registry-server" Oct 11 07:43:02 crc kubenswrapper[5055]: E1011 07:43:02.807126 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" containerName="extract-content" Oct 11 07:43:02 crc kubenswrapper[5055]: I1011 07:43:02.807132 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" containerName="extract-content" Oct 11 07:43:02 crc kubenswrapper[5055]: I1011 07:43:02.807247 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2ce5c7f-10c1-442a-b1eb-b1e2d5d1d65c" containerName="registry-server" Oct 11 07:43:02 crc kubenswrapper[5055]: I1011 07:43:02.808355 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:02 crc kubenswrapper[5055]: I1011 07:43:02.828210 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zpdtt"] Oct 11 07:43:02 crc kubenswrapper[5055]: I1011 07:43:02.935464 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7e995a-f311-4ac1-870a-a6ff18636ed5-catalog-content\") pod \"redhat-operators-zpdtt\" (UID: \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\") " pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:02 crc kubenswrapper[5055]: I1011 07:43:02.935535 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrp5n\" (UniqueName: \"kubernetes.io/projected/bb7e995a-f311-4ac1-870a-a6ff18636ed5-kube-api-access-zrp5n\") pod \"redhat-operators-zpdtt\" (UID: \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\") " pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:02 crc kubenswrapper[5055]: I1011 07:43:02.935593 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7e995a-f311-4ac1-870a-a6ff18636ed5-utilities\") pod \"redhat-operators-zpdtt\" (UID: \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\") " pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:03 crc kubenswrapper[5055]: I1011 07:43:03.036443 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrp5n\" (UniqueName: \"kubernetes.io/projected/bb7e995a-f311-4ac1-870a-a6ff18636ed5-kube-api-access-zrp5n\") pod \"redhat-operators-zpdtt\" (UID: \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\") " pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:03 crc kubenswrapper[5055]: I1011 07:43:03.036509 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7e995a-f311-4ac1-870a-a6ff18636ed5-utilities\") pod \"redhat-operators-zpdtt\" (UID: \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\") " pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:03 crc kubenswrapper[5055]: I1011 07:43:03.036580 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7e995a-f311-4ac1-870a-a6ff18636ed5-catalog-content\") pod \"redhat-operators-zpdtt\" (UID: \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\") " pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:03 crc kubenswrapper[5055]: I1011 07:43:03.037122 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7e995a-f311-4ac1-870a-a6ff18636ed5-catalog-content\") pod \"redhat-operators-zpdtt\" (UID: \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\") " pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:03 crc kubenswrapper[5055]: I1011 07:43:03.037347 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7e995a-f311-4ac1-870a-a6ff18636ed5-utilities\") pod \"redhat-operators-zpdtt\" (UID: \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\") " pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:03 crc kubenswrapper[5055]: I1011 07:43:03.057585 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrp5n\" (UniqueName: \"kubernetes.io/projected/bb7e995a-f311-4ac1-870a-a6ff18636ed5-kube-api-access-zrp5n\") pod \"redhat-operators-zpdtt\" (UID: \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\") " pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:03 crc kubenswrapper[5055]: I1011 07:43:03.130123 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:03 crc kubenswrapper[5055]: I1011 07:43:03.408349 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zpdtt"] Oct 11 07:43:03 crc kubenswrapper[5055]: I1011 07:43:03.827935 5055 generic.go:334] "Generic (PLEG): container finished" podID="bb7e995a-f311-4ac1-870a-a6ff18636ed5" containerID="3bfc0273bbdabd8553e298d205d88b2e6f5b6d9c12d27bd8c4f84ee6486575ae" exitCode=0 Oct 11 07:43:03 crc kubenswrapper[5055]: I1011 07:43:03.828011 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zpdtt" event={"ID":"bb7e995a-f311-4ac1-870a-a6ff18636ed5","Type":"ContainerDied","Data":"3bfc0273bbdabd8553e298d205d88b2e6f5b6d9c12d27bd8c4f84ee6486575ae"} Oct 11 07:43:03 crc kubenswrapper[5055]: I1011 07:43:03.828286 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zpdtt" event={"ID":"bb7e995a-f311-4ac1-870a-a6ff18636ed5","Type":"ContainerStarted","Data":"39d7172ee77385d2d928e4e1f56ad50ff9fec6ffce268ef3716c7d9685bd5d3b"} Oct 11 07:43:04 crc kubenswrapper[5055]: I1011 07:43:04.843637 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zpdtt" event={"ID":"bb7e995a-f311-4ac1-870a-a6ff18636ed5","Type":"ContainerStarted","Data":"e7c810e7f59cf5937fcacdb2fae049e230bcaa41e2269a69de6cdf49fc999b23"} Oct 11 07:43:05 crc kubenswrapper[5055]: I1011 07:43:05.851238 5055 generic.go:334] "Generic (PLEG): container finished" podID="bb7e995a-f311-4ac1-870a-a6ff18636ed5" containerID="e7c810e7f59cf5937fcacdb2fae049e230bcaa41e2269a69de6cdf49fc999b23" exitCode=0 Oct 11 07:43:05 crc kubenswrapper[5055]: I1011 07:43:05.851289 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zpdtt" event={"ID":"bb7e995a-f311-4ac1-870a-a6ff18636ed5","Type":"ContainerDied","Data":"e7c810e7f59cf5937fcacdb2fae049e230bcaa41e2269a69de6cdf49fc999b23"} Oct 11 07:43:06 crc kubenswrapper[5055]: I1011 07:43:06.859075 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zpdtt" event={"ID":"bb7e995a-f311-4ac1-870a-a6ff18636ed5","Type":"ContainerStarted","Data":"c32cf6909ddd63dac28abe7af7e3d6685d1522bb6caadb99220e6c4941d19205"} Oct 11 07:43:06 crc kubenswrapper[5055]: I1011 07:43:06.875027 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zpdtt" podStartSLOduration=2.328466932 podStartE2EDuration="4.875008177s" podCreationTimestamp="2025-10-11 07:43:02 +0000 UTC" firstStartedPulling="2025-10-11 07:43:03.829650485 +0000 UTC m=+2967.603924292" lastFinishedPulling="2025-10-11 07:43:06.37619173 +0000 UTC m=+2970.150465537" observedRunningTime="2025-10-11 07:43:06.872489645 +0000 UTC m=+2970.646763462" watchObservedRunningTime="2025-10-11 07:43:06.875008177 +0000 UTC m=+2970.649281984" Oct 11 07:43:13 crc kubenswrapper[5055]: I1011 07:43:13.131137 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:13 crc kubenswrapper[5055]: I1011 07:43:13.131748 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:13 crc kubenswrapper[5055]: I1011 07:43:13.182562 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:13 crc kubenswrapper[5055]: I1011 07:43:13.938031 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:13 crc kubenswrapper[5055]: I1011 07:43:13.976599 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zpdtt"] Oct 11 07:43:14 crc kubenswrapper[5055]: I1011 07:43:14.993168 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:43:14 crc kubenswrapper[5055]: E1011 07:43:14.993642 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:43:15 crc kubenswrapper[5055]: I1011 07:43:15.912375 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zpdtt" podUID="bb7e995a-f311-4ac1-870a-a6ff18636ed5" containerName="registry-server" containerID="cri-o://c32cf6909ddd63dac28abe7af7e3d6685d1522bb6caadb99220e6c4941d19205" gracePeriod=2 Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.800309 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.921840 5055 generic.go:334] "Generic (PLEG): container finished" podID="bb7e995a-f311-4ac1-870a-a6ff18636ed5" containerID="c32cf6909ddd63dac28abe7af7e3d6685d1522bb6caadb99220e6c4941d19205" exitCode=0 Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.921878 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zpdtt" event={"ID":"bb7e995a-f311-4ac1-870a-a6ff18636ed5","Type":"ContainerDied","Data":"c32cf6909ddd63dac28abe7af7e3d6685d1522bb6caadb99220e6c4941d19205"} Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.921920 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zpdtt" event={"ID":"bb7e995a-f311-4ac1-870a-a6ff18636ed5","Type":"ContainerDied","Data":"39d7172ee77385d2d928e4e1f56ad50ff9fec6ffce268ef3716c7d9685bd5d3b"} Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.921938 5055 scope.go:117] "RemoveContainer" containerID="c32cf6909ddd63dac28abe7af7e3d6685d1522bb6caadb99220e6c4941d19205" Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.921934 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zpdtt" Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.937022 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7e995a-f311-4ac1-870a-a6ff18636ed5-utilities\") pod \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\" (UID: \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\") " Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.937347 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7e995a-f311-4ac1-870a-a6ff18636ed5-catalog-content\") pod \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\" (UID: \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\") " Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.937461 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zrp5n\" (UniqueName: \"kubernetes.io/projected/bb7e995a-f311-4ac1-870a-a6ff18636ed5-kube-api-access-zrp5n\") pod \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\" (UID: \"bb7e995a-f311-4ac1-870a-a6ff18636ed5\") " Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.937721 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb7e995a-f311-4ac1-870a-a6ff18636ed5-utilities" (OuterVolumeSpecName: "utilities") pod "bb7e995a-f311-4ac1-870a-a6ff18636ed5" (UID: "bb7e995a-f311-4ac1-870a-a6ff18636ed5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.937951 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7e995a-f311-4ac1-870a-a6ff18636ed5-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.938594 5055 scope.go:117] "RemoveContainer" containerID="e7c810e7f59cf5937fcacdb2fae049e230bcaa41e2269a69de6cdf49fc999b23" Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.943062 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb7e995a-f311-4ac1-870a-a6ff18636ed5-kube-api-access-zrp5n" (OuterVolumeSpecName: "kube-api-access-zrp5n") pod "bb7e995a-f311-4ac1-870a-a6ff18636ed5" (UID: "bb7e995a-f311-4ac1-870a-a6ff18636ed5"). InnerVolumeSpecName "kube-api-access-zrp5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.976398 5055 scope.go:117] "RemoveContainer" containerID="3bfc0273bbdabd8553e298d205d88b2e6f5b6d9c12d27bd8c4f84ee6486575ae" Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.996785 5055 scope.go:117] "RemoveContainer" containerID="c32cf6909ddd63dac28abe7af7e3d6685d1522bb6caadb99220e6c4941d19205" Oct 11 07:43:16 crc kubenswrapper[5055]: E1011 07:43:16.997167 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c32cf6909ddd63dac28abe7af7e3d6685d1522bb6caadb99220e6c4941d19205\": container with ID starting with c32cf6909ddd63dac28abe7af7e3d6685d1522bb6caadb99220e6c4941d19205 not found: ID does not exist" containerID="c32cf6909ddd63dac28abe7af7e3d6685d1522bb6caadb99220e6c4941d19205" Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.997201 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c32cf6909ddd63dac28abe7af7e3d6685d1522bb6caadb99220e6c4941d19205"} err="failed to get container status \"c32cf6909ddd63dac28abe7af7e3d6685d1522bb6caadb99220e6c4941d19205\": rpc error: code = NotFound desc = could not find container \"c32cf6909ddd63dac28abe7af7e3d6685d1522bb6caadb99220e6c4941d19205\": container with ID starting with c32cf6909ddd63dac28abe7af7e3d6685d1522bb6caadb99220e6c4941d19205 not found: ID does not exist" Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.997220 5055 scope.go:117] "RemoveContainer" containerID="e7c810e7f59cf5937fcacdb2fae049e230bcaa41e2269a69de6cdf49fc999b23" Oct 11 07:43:16 crc kubenswrapper[5055]: E1011 07:43:16.997499 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7c810e7f59cf5937fcacdb2fae049e230bcaa41e2269a69de6cdf49fc999b23\": container with ID starting with e7c810e7f59cf5937fcacdb2fae049e230bcaa41e2269a69de6cdf49fc999b23 not found: ID does not exist" containerID="e7c810e7f59cf5937fcacdb2fae049e230bcaa41e2269a69de6cdf49fc999b23" Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.997517 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7c810e7f59cf5937fcacdb2fae049e230bcaa41e2269a69de6cdf49fc999b23"} err="failed to get container status \"e7c810e7f59cf5937fcacdb2fae049e230bcaa41e2269a69de6cdf49fc999b23\": rpc error: code = NotFound desc = could not find container \"e7c810e7f59cf5937fcacdb2fae049e230bcaa41e2269a69de6cdf49fc999b23\": container with ID starting with e7c810e7f59cf5937fcacdb2fae049e230bcaa41e2269a69de6cdf49fc999b23 not found: ID does not exist" Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.997529 5055 scope.go:117] "RemoveContainer" containerID="3bfc0273bbdabd8553e298d205d88b2e6f5b6d9c12d27bd8c4f84ee6486575ae" Oct 11 07:43:16 crc kubenswrapper[5055]: E1011 07:43:16.997965 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3bfc0273bbdabd8553e298d205d88b2e6f5b6d9c12d27bd8c4f84ee6486575ae\": container with ID starting with 3bfc0273bbdabd8553e298d205d88b2e6f5b6d9c12d27bd8c4f84ee6486575ae not found: ID does not exist" containerID="3bfc0273bbdabd8553e298d205d88b2e6f5b6d9c12d27bd8c4f84ee6486575ae" Oct 11 07:43:16 crc kubenswrapper[5055]: I1011 07:43:16.998004 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3bfc0273bbdabd8553e298d205d88b2e6f5b6d9c12d27bd8c4f84ee6486575ae"} err="failed to get container status \"3bfc0273bbdabd8553e298d205d88b2e6f5b6d9c12d27bd8c4f84ee6486575ae\": rpc error: code = NotFound desc = could not find container \"3bfc0273bbdabd8553e298d205d88b2e6f5b6d9c12d27bd8c4f84ee6486575ae\": container with ID starting with 3bfc0273bbdabd8553e298d205d88b2e6f5b6d9c12d27bd8c4f84ee6486575ae not found: ID does not exist" Oct 11 07:43:17 crc kubenswrapper[5055]: I1011 07:43:17.023560 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb7e995a-f311-4ac1-870a-a6ff18636ed5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bb7e995a-f311-4ac1-870a-a6ff18636ed5" (UID: "bb7e995a-f311-4ac1-870a-a6ff18636ed5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:43:17 crc kubenswrapper[5055]: I1011 07:43:17.039198 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7e995a-f311-4ac1-870a-a6ff18636ed5-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:43:17 crc kubenswrapper[5055]: I1011 07:43:17.039222 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zrp5n\" (UniqueName: \"kubernetes.io/projected/bb7e995a-f311-4ac1-870a-a6ff18636ed5-kube-api-access-zrp5n\") on node \"crc\" DevicePath \"\"" Oct 11 07:43:17 crc kubenswrapper[5055]: I1011 07:43:17.261559 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zpdtt"] Oct 11 07:43:17 crc kubenswrapper[5055]: I1011 07:43:17.275102 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zpdtt"] Oct 11 07:43:17 crc kubenswrapper[5055]: E1011 07:43:17.377053 5055 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb7e995a_f311_4ac1_870a_a6ff18636ed5.slice\": RecentStats: unable to find data in memory cache]" Oct 11 07:43:19 crc kubenswrapper[5055]: I1011 07:43:19.001512 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb7e995a-f311-4ac1-870a-a6ff18636ed5" path="/var/lib/kubelet/pods/bb7e995a-f311-4ac1-870a-a6ff18636ed5/volumes" Oct 11 07:43:27 crc kubenswrapper[5055]: I1011 07:43:27.993710 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:43:27 crc kubenswrapper[5055]: E1011 07:43:27.994341 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:43:38 crc kubenswrapper[5055]: I1011 07:43:38.994583 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:43:38 crc kubenswrapper[5055]: E1011 07:43:38.998190 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:43:51 crc kubenswrapper[5055]: I1011 07:43:51.993640 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:43:51 crc kubenswrapper[5055]: E1011 07:43:51.994401 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:44:03 crc kubenswrapper[5055]: I1011 07:44:03.994904 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:44:03 crc kubenswrapper[5055]: E1011 07:44:03.996426 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:44:18 crc kubenswrapper[5055]: I1011 07:44:18.995120 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:44:18 crc kubenswrapper[5055]: E1011 07:44:18.997459 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.566032 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wvc8x"] Oct 11 07:44:19 crc kubenswrapper[5055]: E1011 07:44:19.566619 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb7e995a-f311-4ac1-870a-a6ff18636ed5" containerName="extract-utilities" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.566632 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb7e995a-f311-4ac1-870a-a6ff18636ed5" containerName="extract-utilities" Oct 11 07:44:19 crc kubenswrapper[5055]: E1011 07:44:19.566642 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb7e995a-f311-4ac1-870a-a6ff18636ed5" containerName="extract-content" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.566649 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb7e995a-f311-4ac1-870a-a6ff18636ed5" containerName="extract-content" Oct 11 07:44:19 crc kubenswrapper[5055]: E1011 07:44:19.566660 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb7e995a-f311-4ac1-870a-a6ff18636ed5" containerName="registry-server" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.566667 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb7e995a-f311-4ac1-870a-a6ff18636ed5" containerName="registry-server" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.566894 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb7e995a-f311-4ac1-870a-a6ff18636ed5" containerName="registry-server" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.567874 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.574738 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wvc8x"] Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.696844 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdkqj\" (UniqueName: \"kubernetes.io/projected/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-kube-api-access-bdkqj\") pod \"community-operators-wvc8x\" (UID: \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\") " pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.696986 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-utilities\") pod \"community-operators-wvc8x\" (UID: \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\") " pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.697027 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-catalog-content\") pod \"community-operators-wvc8x\" (UID: \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\") " pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.798246 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-utilities\") pod \"community-operators-wvc8x\" (UID: \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\") " pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.798302 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-catalog-content\") pod \"community-operators-wvc8x\" (UID: \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\") " pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.798357 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdkqj\" (UniqueName: \"kubernetes.io/projected/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-kube-api-access-bdkqj\") pod \"community-operators-wvc8x\" (UID: \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\") " pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.798876 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-utilities\") pod \"community-operators-wvc8x\" (UID: \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\") " pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.798928 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-catalog-content\") pod \"community-operators-wvc8x\" (UID: \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\") " pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.831576 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdkqj\" (UniqueName: \"kubernetes.io/projected/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-kube-api-access-bdkqj\") pod \"community-operators-wvc8x\" (UID: \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\") " pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:19 crc kubenswrapper[5055]: I1011 07:44:19.888123 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:20 crc kubenswrapper[5055]: I1011 07:44:20.398033 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wvc8x"] Oct 11 07:44:21 crc kubenswrapper[5055]: I1011 07:44:21.410449 5055 generic.go:334] "Generic (PLEG): container finished" podID="9cdab93d-3ce3-40e1-ba5e-c0554824a81c" containerID="cb623789bae3eb8317715f601bdccbf6a9ab8c90a7050584584b7fa63f987d16" exitCode=0 Oct 11 07:44:21 crc kubenswrapper[5055]: I1011 07:44:21.410511 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wvc8x" event={"ID":"9cdab93d-3ce3-40e1-ba5e-c0554824a81c","Type":"ContainerDied","Data":"cb623789bae3eb8317715f601bdccbf6a9ab8c90a7050584584b7fa63f987d16"} Oct 11 07:44:21 crc kubenswrapper[5055]: I1011 07:44:21.410794 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wvc8x" event={"ID":"9cdab93d-3ce3-40e1-ba5e-c0554824a81c","Type":"ContainerStarted","Data":"41a4fccf3e2917c509bf91ac89f6261b2ebb4621ad49c28e1e736148cc38a7af"} Oct 11 07:44:21 crc kubenswrapper[5055]: I1011 07:44:21.412318 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 07:44:22 crc kubenswrapper[5055]: I1011 07:44:22.421329 5055 generic.go:334] "Generic (PLEG): container finished" podID="9cdab93d-3ce3-40e1-ba5e-c0554824a81c" containerID="9921b246bd2f35126d60795f481e095b03933e3328bdc76bbe77d60c29c2c899" exitCode=0 Oct 11 07:44:22 crc kubenswrapper[5055]: I1011 07:44:22.421385 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wvc8x" event={"ID":"9cdab93d-3ce3-40e1-ba5e-c0554824a81c","Type":"ContainerDied","Data":"9921b246bd2f35126d60795f481e095b03933e3328bdc76bbe77d60c29c2c899"} Oct 11 07:44:23 crc kubenswrapper[5055]: I1011 07:44:23.434011 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wvc8x" event={"ID":"9cdab93d-3ce3-40e1-ba5e-c0554824a81c","Type":"ContainerStarted","Data":"6ac6041d0c39b2f64714febde12edf529c597960947a4b8339434c48777ae747"} Oct 11 07:44:23 crc kubenswrapper[5055]: I1011 07:44:23.451118 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wvc8x" podStartSLOduration=3.004190465 podStartE2EDuration="4.45110069s" podCreationTimestamp="2025-10-11 07:44:19 +0000 UTC" firstStartedPulling="2025-10-11 07:44:21.412039136 +0000 UTC m=+3045.186312943" lastFinishedPulling="2025-10-11 07:44:22.858949361 +0000 UTC m=+3046.633223168" observedRunningTime="2025-10-11 07:44:23.448335891 +0000 UTC m=+3047.222609698" watchObservedRunningTime="2025-10-11 07:44:23.45110069 +0000 UTC m=+3047.225374497" Oct 11 07:44:29 crc kubenswrapper[5055]: I1011 07:44:29.889291 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:29 crc kubenswrapper[5055]: I1011 07:44:29.889855 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:29 crc kubenswrapper[5055]: I1011 07:44:29.923354 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:30 crc kubenswrapper[5055]: I1011 07:44:30.532957 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:30 crc kubenswrapper[5055]: I1011 07:44:30.578165 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wvc8x"] Oct 11 07:44:31 crc kubenswrapper[5055]: I1011 07:44:31.994011 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:44:31 crc kubenswrapper[5055]: E1011 07:44:31.994326 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:44:32 crc kubenswrapper[5055]: I1011 07:44:32.509000 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wvc8x" podUID="9cdab93d-3ce3-40e1-ba5e-c0554824a81c" containerName="registry-server" containerID="cri-o://6ac6041d0c39b2f64714febde12edf529c597960947a4b8339434c48777ae747" gracePeriod=2 Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.428853 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.496370 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdkqj\" (UniqueName: \"kubernetes.io/projected/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-kube-api-access-bdkqj\") pod \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\" (UID: \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\") " Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.496483 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-utilities\") pod \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\" (UID: \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\") " Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.496547 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-catalog-content\") pod \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\" (UID: \"9cdab93d-3ce3-40e1-ba5e-c0554824a81c\") " Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.497365 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-utilities" (OuterVolumeSpecName: "utilities") pod "9cdab93d-3ce3-40e1-ba5e-c0554824a81c" (UID: "9cdab93d-3ce3-40e1-ba5e-c0554824a81c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.508141 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-kube-api-access-bdkqj" (OuterVolumeSpecName: "kube-api-access-bdkqj") pod "9cdab93d-3ce3-40e1-ba5e-c0554824a81c" (UID: "9cdab93d-3ce3-40e1-ba5e-c0554824a81c"). InnerVolumeSpecName "kube-api-access-bdkqj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.535086 5055 generic.go:334] "Generic (PLEG): container finished" podID="9cdab93d-3ce3-40e1-ba5e-c0554824a81c" containerID="6ac6041d0c39b2f64714febde12edf529c597960947a4b8339434c48777ae747" exitCode=0 Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.535135 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wvc8x" event={"ID":"9cdab93d-3ce3-40e1-ba5e-c0554824a81c","Type":"ContainerDied","Data":"6ac6041d0c39b2f64714febde12edf529c597960947a4b8339434c48777ae747"} Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.535163 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wvc8x" event={"ID":"9cdab93d-3ce3-40e1-ba5e-c0554824a81c","Type":"ContainerDied","Data":"41a4fccf3e2917c509bf91ac89f6261b2ebb4621ad49c28e1e736148cc38a7af"} Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.535180 5055 scope.go:117] "RemoveContainer" containerID="6ac6041d0c39b2f64714febde12edf529c597960947a4b8339434c48777ae747" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.535327 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wvc8x" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.570943 5055 scope.go:117] "RemoveContainer" containerID="9921b246bd2f35126d60795f481e095b03933e3328bdc76bbe77d60c29c2c899" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.598697 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.598744 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdkqj\" (UniqueName: \"kubernetes.io/projected/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-kube-api-access-bdkqj\") on node \"crc\" DevicePath \"\"" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.618805 5055 scope.go:117] "RemoveContainer" containerID="cb623789bae3eb8317715f601bdccbf6a9ab8c90a7050584584b7fa63f987d16" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.621444 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9cdab93d-3ce3-40e1-ba5e-c0554824a81c" (UID: "9cdab93d-3ce3-40e1-ba5e-c0554824a81c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.635017 5055 scope.go:117] "RemoveContainer" containerID="6ac6041d0c39b2f64714febde12edf529c597960947a4b8339434c48777ae747" Oct 11 07:44:33 crc kubenswrapper[5055]: E1011 07:44:33.635456 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ac6041d0c39b2f64714febde12edf529c597960947a4b8339434c48777ae747\": container with ID starting with 6ac6041d0c39b2f64714febde12edf529c597960947a4b8339434c48777ae747 not found: ID does not exist" containerID="6ac6041d0c39b2f64714febde12edf529c597960947a4b8339434c48777ae747" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.635502 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ac6041d0c39b2f64714febde12edf529c597960947a4b8339434c48777ae747"} err="failed to get container status \"6ac6041d0c39b2f64714febde12edf529c597960947a4b8339434c48777ae747\": rpc error: code = NotFound desc = could not find container \"6ac6041d0c39b2f64714febde12edf529c597960947a4b8339434c48777ae747\": container with ID starting with 6ac6041d0c39b2f64714febde12edf529c597960947a4b8339434c48777ae747 not found: ID does not exist" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.635534 5055 scope.go:117] "RemoveContainer" containerID="9921b246bd2f35126d60795f481e095b03933e3328bdc76bbe77d60c29c2c899" Oct 11 07:44:33 crc kubenswrapper[5055]: E1011 07:44:33.635886 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9921b246bd2f35126d60795f481e095b03933e3328bdc76bbe77d60c29c2c899\": container with ID starting with 9921b246bd2f35126d60795f481e095b03933e3328bdc76bbe77d60c29c2c899 not found: ID does not exist" containerID="9921b246bd2f35126d60795f481e095b03933e3328bdc76bbe77d60c29c2c899" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.635925 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9921b246bd2f35126d60795f481e095b03933e3328bdc76bbe77d60c29c2c899"} err="failed to get container status \"9921b246bd2f35126d60795f481e095b03933e3328bdc76bbe77d60c29c2c899\": rpc error: code = NotFound desc = could not find container \"9921b246bd2f35126d60795f481e095b03933e3328bdc76bbe77d60c29c2c899\": container with ID starting with 9921b246bd2f35126d60795f481e095b03933e3328bdc76bbe77d60c29c2c899 not found: ID does not exist" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.635961 5055 scope.go:117] "RemoveContainer" containerID="cb623789bae3eb8317715f601bdccbf6a9ab8c90a7050584584b7fa63f987d16" Oct 11 07:44:33 crc kubenswrapper[5055]: E1011 07:44:33.636311 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb623789bae3eb8317715f601bdccbf6a9ab8c90a7050584584b7fa63f987d16\": container with ID starting with cb623789bae3eb8317715f601bdccbf6a9ab8c90a7050584584b7fa63f987d16 not found: ID does not exist" containerID="cb623789bae3eb8317715f601bdccbf6a9ab8c90a7050584584b7fa63f987d16" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.636348 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb623789bae3eb8317715f601bdccbf6a9ab8c90a7050584584b7fa63f987d16"} err="failed to get container status \"cb623789bae3eb8317715f601bdccbf6a9ab8c90a7050584584b7fa63f987d16\": rpc error: code = NotFound desc = could not find container \"cb623789bae3eb8317715f601bdccbf6a9ab8c90a7050584584b7fa63f987d16\": container with ID starting with cb623789bae3eb8317715f601bdccbf6a9ab8c90a7050584584b7fa63f987d16 not found: ID does not exist" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.700535 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdab93d-3ce3-40e1-ba5e-c0554824a81c-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.873738 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wvc8x"] Oct 11 07:44:33 crc kubenswrapper[5055]: I1011 07:44:33.880153 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wvc8x"] Oct 11 07:44:35 crc kubenswrapper[5055]: I1011 07:44:35.005612 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cdab93d-3ce3-40e1-ba5e-c0554824a81c" path="/var/lib/kubelet/pods/9cdab93d-3ce3-40e1-ba5e-c0554824a81c/volumes" Oct 11 07:44:45 crc kubenswrapper[5055]: I1011 07:44:45.994573 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:44:45 crc kubenswrapper[5055]: E1011 07:44:45.995426 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:44:57 crc kubenswrapper[5055]: I1011 07:44:57.994213 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:44:57 crc kubenswrapper[5055]: E1011 07:44:57.994933 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.166419 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc"] Oct 11 07:45:00 crc kubenswrapper[5055]: E1011 07:45:00.168353 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cdab93d-3ce3-40e1-ba5e-c0554824a81c" containerName="extract-content" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.168399 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cdab93d-3ce3-40e1-ba5e-c0554824a81c" containerName="extract-content" Oct 11 07:45:00 crc kubenswrapper[5055]: E1011 07:45:00.168418 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cdab93d-3ce3-40e1-ba5e-c0554824a81c" containerName="extract-utilities" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.168425 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cdab93d-3ce3-40e1-ba5e-c0554824a81c" containerName="extract-utilities" Oct 11 07:45:00 crc kubenswrapper[5055]: E1011 07:45:00.168439 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cdab93d-3ce3-40e1-ba5e-c0554824a81c" containerName="registry-server" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.168445 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cdab93d-3ce3-40e1-ba5e-c0554824a81c" containerName="registry-server" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.168596 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cdab93d-3ce3-40e1-ba5e-c0554824a81c" containerName="registry-server" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.169273 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.173093 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.173309 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.178800 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc"] Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.363111 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w284x\" (UniqueName: \"kubernetes.io/projected/bc52ecca-cec2-4a1f-a930-42c62690889d-kube-api-access-w284x\") pod \"collect-profiles-29336145-26zpc\" (UID: \"bc52ecca-cec2-4a1f-a930-42c62690889d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.363162 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bc52ecca-cec2-4a1f-a930-42c62690889d-secret-volume\") pod \"collect-profiles-29336145-26zpc\" (UID: \"bc52ecca-cec2-4a1f-a930-42c62690889d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.363188 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bc52ecca-cec2-4a1f-a930-42c62690889d-config-volume\") pod \"collect-profiles-29336145-26zpc\" (UID: \"bc52ecca-cec2-4a1f-a930-42c62690889d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.464712 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w284x\" (UniqueName: \"kubernetes.io/projected/bc52ecca-cec2-4a1f-a930-42c62690889d-kube-api-access-w284x\") pod \"collect-profiles-29336145-26zpc\" (UID: \"bc52ecca-cec2-4a1f-a930-42c62690889d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.464798 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bc52ecca-cec2-4a1f-a930-42c62690889d-secret-volume\") pod \"collect-profiles-29336145-26zpc\" (UID: \"bc52ecca-cec2-4a1f-a930-42c62690889d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.464831 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bc52ecca-cec2-4a1f-a930-42c62690889d-config-volume\") pod \"collect-profiles-29336145-26zpc\" (UID: \"bc52ecca-cec2-4a1f-a930-42c62690889d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.465726 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bc52ecca-cec2-4a1f-a930-42c62690889d-config-volume\") pod \"collect-profiles-29336145-26zpc\" (UID: \"bc52ecca-cec2-4a1f-a930-42c62690889d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.472343 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bc52ecca-cec2-4a1f-a930-42c62690889d-secret-volume\") pod \"collect-profiles-29336145-26zpc\" (UID: \"bc52ecca-cec2-4a1f-a930-42c62690889d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.482859 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w284x\" (UniqueName: \"kubernetes.io/projected/bc52ecca-cec2-4a1f-a930-42c62690889d-kube-api-access-w284x\") pod \"collect-profiles-29336145-26zpc\" (UID: \"bc52ecca-cec2-4a1f-a930-42c62690889d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.488928 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" Oct 11 07:45:00 crc kubenswrapper[5055]: I1011 07:45:00.879145 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc"] Oct 11 07:45:01 crc kubenswrapper[5055]: I1011 07:45:01.733886 5055 generic.go:334] "Generic (PLEG): container finished" podID="bc52ecca-cec2-4a1f-a930-42c62690889d" containerID="2779cafc80e61a7ee9e5509c3da9e854b8a61bda938540417e72e449fe88c9df" exitCode=0 Oct 11 07:45:01 crc kubenswrapper[5055]: I1011 07:45:01.734196 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" event={"ID":"bc52ecca-cec2-4a1f-a930-42c62690889d","Type":"ContainerDied","Data":"2779cafc80e61a7ee9e5509c3da9e854b8a61bda938540417e72e449fe88c9df"} Oct 11 07:45:01 crc kubenswrapper[5055]: I1011 07:45:01.734229 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" event={"ID":"bc52ecca-cec2-4a1f-a930-42c62690889d","Type":"ContainerStarted","Data":"cc3adbfe4205e70351c8364b2f1cccc72edd94f7e05f964838c0974e64ce28fe"} Oct 11 07:45:03 crc kubenswrapper[5055]: I1011 07:45:03.003197 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" Oct 11 07:45:03 crc kubenswrapper[5055]: I1011 07:45:03.107513 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w284x\" (UniqueName: \"kubernetes.io/projected/bc52ecca-cec2-4a1f-a930-42c62690889d-kube-api-access-w284x\") pod \"bc52ecca-cec2-4a1f-a930-42c62690889d\" (UID: \"bc52ecca-cec2-4a1f-a930-42c62690889d\") " Oct 11 07:45:03 crc kubenswrapper[5055]: I1011 07:45:03.107624 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bc52ecca-cec2-4a1f-a930-42c62690889d-config-volume\") pod \"bc52ecca-cec2-4a1f-a930-42c62690889d\" (UID: \"bc52ecca-cec2-4a1f-a930-42c62690889d\") " Oct 11 07:45:03 crc kubenswrapper[5055]: I1011 07:45:03.107711 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bc52ecca-cec2-4a1f-a930-42c62690889d-secret-volume\") pod \"bc52ecca-cec2-4a1f-a930-42c62690889d\" (UID: \"bc52ecca-cec2-4a1f-a930-42c62690889d\") " Oct 11 07:45:03 crc kubenswrapper[5055]: I1011 07:45:03.108494 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc52ecca-cec2-4a1f-a930-42c62690889d-config-volume" (OuterVolumeSpecName: "config-volume") pod "bc52ecca-cec2-4a1f-a930-42c62690889d" (UID: "bc52ecca-cec2-4a1f-a930-42c62690889d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 07:45:03 crc kubenswrapper[5055]: I1011 07:45:03.114080 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc52ecca-cec2-4a1f-a930-42c62690889d-kube-api-access-w284x" (OuterVolumeSpecName: "kube-api-access-w284x") pod "bc52ecca-cec2-4a1f-a930-42c62690889d" (UID: "bc52ecca-cec2-4a1f-a930-42c62690889d"). InnerVolumeSpecName "kube-api-access-w284x". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:45:03 crc kubenswrapper[5055]: I1011 07:45:03.114125 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc52ecca-cec2-4a1f-a930-42c62690889d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "bc52ecca-cec2-4a1f-a930-42c62690889d" (UID: "bc52ecca-cec2-4a1f-a930-42c62690889d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 07:45:03 crc kubenswrapper[5055]: I1011 07:45:03.210879 5055 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bc52ecca-cec2-4a1f-a930-42c62690889d-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 07:45:03 crc kubenswrapper[5055]: I1011 07:45:03.210964 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w284x\" (UniqueName: \"kubernetes.io/projected/bc52ecca-cec2-4a1f-a930-42c62690889d-kube-api-access-w284x\") on node \"crc\" DevicePath \"\"" Oct 11 07:45:03 crc kubenswrapper[5055]: I1011 07:45:03.210996 5055 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bc52ecca-cec2-4a1f-a930-42c62690889d-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 07:45:03 crc kubenswrapper[5055]: I1011 07:45:03.752492 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" event={"ID":"bc52ecca-cec2-4a1f-a930-42c62690889d","Type":"ContainerDied","Data":"cc3adbfe4205e70351c8364b2f1cccc72edd94f7e05f964838c0974e64ce28fe"} Oct 11 07:45:03 crc kubenswrapper[5055]: I1011 07:45:03.752556 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc3adbfe4205e70351c8364b2f1cccc72edd94f7e05f964838c0974e64ce28fe" Oct 11 07:45:03 crc kubenswrapper[5055]: I1011 07:45:03.752601 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc" Oct 11 07:45:04 crc kubenswrapper[5055]: I1011 07:45:04.091816 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2"] Oct 11 07:45:04 crc kubenswrapper[5055]: I1011 07:45:04.101324 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336100-5zjw2"] Oct 11 07:45:05 crc kubenswrapper[5055]: I1011 07:45:05.002235 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d42f8ea2-1177-4851-9a44-452ef9b701b9" path="/var/lib/kubelet/pods/d42f8ea2-1177-4851-9a44-452ef9b701b9/volumes" Oct 11 07:45:09 crc kubenswrapper[5055]: I1011 07:45:09.993531 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:45:09 crc kubenswrapper[5055]: E1011 07:45:09.994033 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:45:23 crc kubenswrapper[5055]: I1011 07:45:23.993614 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:45:23 crc kubenswrapper[5055]: E1011 07:45:23.994335 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:45:34 crc kubenswrapper[5055]: I1011 07:45:34.993682 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:45:34 crc kubenswrapper[5055]: E1011 07:45:34.994581 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:45:49 crc kubenswrapper[5055]: I1011 07:45:49.993131 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:45:49 crc kubenswrapper[5055]: E1011 07:45:49.993808 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:45:53 crc kubenswrapper[5055]: I1011 07:45:53.479333 5055 scope.go:117] "RemoveContainer" containerID="c261bc982101a5b94c8faab8960c5713e8b6646ffe1bcf3fde44a2fc00ff21ad" Oct 11 07:46:00 crc kubenswrapper[5055]: I1011 07:46:00.993914 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:46:00 crc kubenswrapper[5055]: E1011 07:46:00.995186 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:46:12 crc kubenswrapper[5055]: I1011 07:46:12.993623 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:46:12 crc kubenswrapper[5055]: E1011 07:46:12.994362 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:46:26 crc kubenswrapper[5055]: I1011 07:46:26.999223 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:46:27 crc kubenswrapper[5055]: E1011 07:46:26.999910 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:46:37 crc kubenswrapper[5055]: I1011 07:46:37.993568 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:46:38 crc kubenswrapper[5055]: I1011 07:46:38.485847 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"93a94dda3cc01b820a409ac42e93a7ef2a8f3fb9c492bcc359fd663c90041907"} Oct 11 07:49:02 crc kubenswrapper[5055]: I1011 07:49:02.422579 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:49:02 crc kubenswrapper[5055]: I1011 07:49:02.423104 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:49:32 crc kubenswrapper[5055]: I1011 07:49:32.422162 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:49:32 crc kubenswrapper[5055]: I1011 07:49:32.422716 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:49:33 crc kubenswrapper[5055]: I1011 07:49:33.720991 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-w6nhp"] Oct 11 07:49:33 crc kubenswrapper[5055]: E1011 07:49:33.721335 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc52ecca-cec2-4a1f-a930-42c62690889d" containerName="collect-profiles" Oct 11 07:49:33 crc kubenswrapper[5055]: I1011 07:49:33.721350 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc52ecca-cec2-4a1f-a930-42c62690889d" containerName="collect-profiles" Oct 11 07:49:33 crc kubenswrapper[5055]: I1011 07:49:33.721549 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc52ecca-cec2-4a1f-a930-42c62690889d" containerName="collect-profiles" Oct 11 07:49:33 crc kubenswrapper[5055]: I1011 07:49:33.722833 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:33 crc kubenswrapper[5055]: I1011 07:49:33.733255 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-w6nhp"] Oct 11 07:49:33 crc kubenswrapper[5055]: I1011 07:49:33.849572 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ded35850-f77e-48ce-a6e2-927b6c82b11c-utilities\") pod \"redhat-marketplace-w6nhp\" (UID: \"ded35850-f77e-48ce-a6e2-927b6c82b11c\") " pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:33 crc kubenswrapper[5055]: I1011 07:49:33.849687 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ded35850-f77e-48ce-a6e2-927b6c82b11c-catalog-content\") pod \"redhat-marketplace-w6nhp\" (UID: \"ded35850-f77e-48ce-a6e2-927b6c82b11c\") " pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:33 crc kubenswrapper[5055]: I1011 07:49:33.849738 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrr4c\" (UniqueName: \"kubernetes.io/projected/ded35850-f77e-48ce-a6e2-927b6c82b11c-kube-api-access-vrr4c\") pod \"redhat-marketplace-w6nhp\" (UID: \"ded35850-f77e-48ce-a6e2-927b6c82b11c\") " pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:33 crc kubenswrapper[5055]: I1011 07:49:33.950921 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ded35850-f77e-48ce-a6e2-927b6c82b11c-utilities\") pod \"redhat-marketplace-w6nhp\" (UID: \"ded35850-f77e-48ce-a6e2-927b6c82b11c\") " pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:33 crc kubenswrapper[5055]: I1011 07:49:33.951002 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ded35850-f77e-48ce-a6e2-927b6c82b11c-catalog-content\") pod \"redhat-marketplace-w6nhp\" (UID: \"ded35850-f77e-48ce-a6e2-927b6c82b11c\") " pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:33 crc kubenswrapper[5055]: I1011 07:49:33.951032 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrr4c\" (UniqueName: \"kubernetes.io/projected/ded35850-f77e-48ce-a6e2-927b6c82b11c-kube-api-access-vrr4c\") pod \"redhat-marketplace-w6nhp\" (UID: \"ded35850-f77e-48ce-a6e2-927b6c82b11c\") " pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:33 crc kubenswrapper[5055]: I1011 07:49:33.951417 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ded35850-f77e-48ce-a6e2-927b6c82b11c-utilities\") pod \"redhat-marketplace-w6nhp\" (UID: \"ded35850-f77e-48ce-a6e2-927b6c82b11c\") " pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:33 crc kubenswrapper[5055]: I1011 07:49:33.951603 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ded35850-f77e-48ce-a6e2-927b6c82b11c-catalog-content\") pod \"redhat-marketplace-w6nhp\" (UID: \"ded35850-f77e-48ce-a6e2-927b6c82b11c\") " pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:33 crc kubenswrapper[5055]: I1011 07:49:33.975268 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrr4c\" (UniqueName: \"kubernetes.io/projected/ded35850-f77e-48ce-a6e2-927b6c82b11c-kube-api-access-vrr4c\") pod \"redhat-marketplace-w6nhp\" (UID: \"ded35850-f77e-48ce-a6e2-927b6c82b11c\") " pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:34 crc kubenswrapper[5055]: I1011 07:49:34.042277 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:34 crc kubenswrapper[5055]: I1011 07:49:34.295129 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-w6nhp"] Oct 11 07:49:34 crc kubenswrapper[5055]: I1011 07:49:34.764444 5055 generic.go:334] "Generic (PLEG): container finished" podID="ded35850-f77e-48ce-a6e2-927b6c82b11c" containerID="aeafac512255d7c493b0bb9874e3f05056e11935b73dc50b41ff627d17dd59f4" exitCode=0 Oct 11 07:49:34 crc kubenswrapper[5055]: I1011 07:49:34.764621 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w6nhp" event={"ID":"ded35850-f77e-48ce-a6e2-927b6c82b11c","Type":"ContainerDied","Data":"aeafac512255d7c493b0bb9874e3f05056e11935b73dc50b41ff627d17dd59f4"} Oct 11 07:49:34 crc kubenswrapper[5055]: I1011 07:49:34.764677 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w6nhp" event={"ID":"ded35850-f77e-48ce-a6e2-927b6c82b11c","Type":"ContainerStarted","Data":"902c6139b887b9801b99ec001e8f363718d945704836713874fe803c2a5ead05"} Oct 11 07:49:34 crc kubenswrapper[5055]: I1011 07:49:34.767589 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 07:49:35 crc kubenswrapper[5055]: I1011 07:49:35.778463 5055 generic.go:334] "Generic (PLEG): container finished" podID="ded35850-f77e-48ce-a6e2-927b6c82b11c" containerID="95199e2351cce0a362f7963d5f08584714744a3dc68ef8fac3af2632dd7ec32c" exitCode=0 Oct 11 07:49:35 crc kubenswrapper[5055]: I1011 07:49:35.778544 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w6nhp" event={"ID":"ded35850-f77e-48ce-a6e2-927b6c82b11c","Type":"ContainerDied","Data":"95199e2351cce0a362f7963d5f08584714744a3dc68ef8fac3af2632dd7ec32c"} Oct 11 07:49:36 crc kubenswrapper[5055]: I1011 07:49:36.788447 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w6nhp" event={"ID":"ded35850-f77e-48ce-a6e2-927b6c82b11c","Type":"ContainerStarted","Data":"744891cb53832b4265f19b7b8880ccbcce5a791c5558e681309bd0ecd4aa857a"} Oct 11 07:49:36 crc kubenswrapper[5055]: I1011 07:49:36.802924 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-w6nhp" podStartSLOduration=2.369115445 podStartE2EDuration="3.802905259s" podCreationTimestamp="2025-10-11 07:49:33 +0000 UTC" firstStartedPulling="2025-10-11 07:49:34.766335956 +0000 UTC m=+3358.540609763" lastFinishedPulling="2025-10-11 07:49:36.20012577 +0000 UTC m=+3359.974399577" observedRunningTime="2025-10-11 07:49:36.802363313 +0000 UTC m=+3360.576637110" watchObservedRunningTime="2025-10-11 07:49:36.802905259 +0000 UTC m=+3360.577179066" Oct 11 07:49:44 crc kubenswrapper[5055]: I1011 07:49:44.042575 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:44 crc kubenswrapper[5055]: I1011 07:49:44.043260 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:44 crc kubenswrapper[5055]: I1011 07:49:44.082023 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:44 crc kubenswrapper[5055]: I1011 07:49:44.891111 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:44 crc kubenswrapper[5055]: I1011 07:49:44.928982 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-w6nhp"] Oct 11 07:49:46 crc kubenswrapper[5055]: I1011 07:49:46.860298 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-w6nhp" podUID="ded35850-f77e-48ce-a6e2-927b6c82b11c" containerName="registry-server" containerID="cri-o://744891cb53832b4265f19b7b8880ccbcce5a791c5558e681309bd0ecd4aa857a" gracePeriod=2 Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.231524 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.233521 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ded35850-f77e-48ce-a6e2-927b6c82b11c-catalog-content\") pod \"ded35850-f77e-48ce-a6e2-927b6c82b11c\" (UID: \"ded35850-f77e-48ce-a6e2-927b6c82b11c\") " Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.233584 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ded35850-f77e-48ce-a6e2-927b6c82b11c-utilities\") pod \"ded35850-f77e-48ce-a6e2-927b6c82b11c\" (UID: \"ded35850-f77e-48ce-a6e2-927b6c82b11c\") " Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.233651 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrr4c\" (UniqueName: \"kubernetes.io/projected/ded35850-f77e-48ce-a6e2-927b6c82b11c-kube-api-access-vrr4c\") pod \"ded35850-f77e-48ce-a6e2-927b6c82b11c\" (UID: \"ded35850-f77e-48ce-a6e2-927b6c82b11c\") " Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.234573 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ded35850-f77e-48ce-a6e2-927b6c82b11c-utilities" (OuterVolumeSpecName: "utilities") pod "ded35850-f77e-48ce-a6e2-927b6c82b11c" (UID: "ded35850-f77e-48ce-a6e2-927b6c82b11c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.240404 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ded35850-f77e-48ce-a6e2-927b6c82b11c-kube-api-access-vrr4c" (OuterVolumeSpecName: "kube-api-access-vrr4c") pod "ded35850-f77e-48ce-a6e2-927b6c82b11c" (UID: "ded35850-f77e-48ce-a6e2-927b6c82b11c"). InnerVolumeSpecName "kube-api-access-vrr4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.255424 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ded35850-f77e-48ce-a6e2-927b6c82b11c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ded35850-f77e-48ce-a6e2-927b6c82b11c" (UID: "ded35850-f77e-48ce-a6e2-927b6c82b11c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.334707 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ded35850-f77e-48ce-a6e2-927b6c82b11c-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.334743 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ded35850-f77e-48ce-a6e2-927b6c82b11c-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.334753 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrr4c\" (UniqueName: \"kubernetes.io/projected/ded35850-f77e-48ce-a6e2-927b6c82b11c-kube-api-access-vrr4c\") on node \"crc\" DevicePath \"\"" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.870334 5055 generic.go:334] "Generic (PLEG): container finished" podID="ded35850-f77e-48ce-a6e2-927b6c82b11c" containerID="744891cb53832b4265f19b7b8880ccbcce5a791c5558e681309bd0ecd4aa857a" exitCode=0 Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.870385 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w6nhp" event={"ID":"ded35850-f77e-48ce-a6e2-927b6c82b11c","Type":"ContainerDied","Data":"744891cb53832b4265f19b7b8880ccbcce5a791c5558e681309bd0ecd4aa857a"} Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.870422 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w6nhp" event={"ID":"ded35850-f77e-48ce-a6e2-927b6c82b11c","Type":"ContainerDied","Data":"902c6139b887b9801b99ec001e8f363718d945704836713874fe803c2a5ead05"} Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.870425 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w6nhp" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.870442 5055 scope.go:117] "RemoveContainer" containerID="744891cb53832b4265f19b7b8880ccbcce5a791c5558e681309bd0ecd4aa857a" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.889351 5055 scope.go:117] "RemoveContainer" containerID="95199e2351cce0a362f7963d5f08584714744a3dc68ef8fac3af2632dd7ec32c" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.908253 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-w6nhp"] Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.914646 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-w6nhp"] Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.930473 5055 scope.go:117] "RemoveContainer" containerID="aeafac512255d7c493b0bb9874e3f05056e11935b73dc50b41ff627d17dd59f4" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.948413 5055 scope.go:117] "RemoveContainer" containerID="744891cb53832b4265f19b7b8880ccbcce5a791c5558e681309bd0ecd4aa857a" Oct 11 07:49:47 crc kubenswrapper[5055]: E1011 07:49:47.948904 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"744891cb53832b4265f19b7b8880ccbcce5a791c5558e681309bd0ecd4aa857a\": container with ID starting with 744891cb53832b4265f19b7b8880ccbcce5a791c5558e681309bd0ecd4aa857a not found: ID does not exist" containerID="744891cb53832b4265f19b7b8880ccbcce5a791c5558e681309bd0ecd4aa857a" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.948955 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"744891cb53832b4265f19b7b8880ccbcce5a791c5558e681309bd0ecd4aa857a"} err="failed to get container status \"744891cb53832b4265f19b7b8880ccbcce5a791c5558e681309bd0ecd4aa857a\": rpc error: code = NotFound desc = could not find container \"744891cb53832b4265f19b7b8880ccbcce5a791c5558e681309bd0ecd4aa857a\": container with ID starting with 744891cb53832b4265f19b7b8880ccbcce5a791c5558e681309bd0ecd4aa857a not found: ID does not exist" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.948983 5055 scope.go:117] "RemoveContainer" containerID="95199e2351cce0a362f7963d5f08584714744a3dc68ef8fac3af2632dd7ec32c" Oct 11 07:49:47 crc kubenswrapper[5055]: E1011 07:49:47.949338 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95199e2351cce0a362f7963d5f08584714744a3dc68ef8fac3af2632dd7ec32c\": container with ID starting with 95199e2351cce0a362f7963d5f08584714744a3dc68ef8fac3af2632dd7ec32c not found: ID does not exist" containerID="95199e2351cce0a362f7963d5f08584714744a3dc68ef8fac3af2632dd7ec32c" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.949365 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95199e2351cce0a362f7963d5f08584714744a3dc68ef8fac3af2632dd7ec32c"} err="failed to get container status \"95199e2351cce0a362f7963d5f08584714744a3dc68ef8fac3af2632dd7ec32c\": rpc error: code = NotFound desc = could not find container \"95199e2351cce0a362f7963d5f08584714744a3dc68ef8fac3af2632dd7ec32c\": container with ID starting with 95199e2351cce0a362f7963d5f08584714744a3dc68ef8fac3af2632dd7ec32c not found: ID does not exist" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.949378 5055 scope.go:117] "RemoveContainer" containerID="aeafac512255d7c493b0bb9874e3f05056e11935b73dc50b41ff627d17dd59f4" Oct 11 07:49:47 crc kubenswrapper[5055]: E1011 07:49:47.949622 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aeafac512255d7c493b0bb9874e3f05056e11935b73dc50b41ff627d17dd59f4\": container with ID starting with aeafac512255d7c493b0bb9874e3f05056e11935b73dc50b41ff627d17dd59f4 not found: ID does not exist" containerID="aeafac512255d7c493b0bb9874e3f05056e11935b73dc50b41ff627d17dd59f4" Oct 11 07:49:47 crc kubenswrapper[5055]: I1011 07:49:47.949676 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aeafac512255d7c493b0bb9874e3f05056e11935b73dc50b41ff627d17dd59f4"} err="failed to get container status \"aeafac512255d7c493b0bb9874e3f05056e11935b73dc50b41ff627d17dd59f4\": rpc error: code = NotFound desc = could not find container \"aeafac512255d7c493b0bb9874e3f05056e11935b73dc50b41ff627d17dd59f4\": container with ID starting with aeafac512255d7c493b0bb9874e3f05056e11935b73dc50b41ff627d17dd59f4 not found: ID does not exist" Oct 11 07:49:49 crc kubenswrapper[5055]: I1011 07:49:49.002746 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ded35850-f77e-48ce-a6e2-927b6c82b11c" path="/var/lib/kubelet/pods/ded35850-f77e-48ce-a6e2-927b6c82b11c/volumes" Oct 11 07:50:02 crc kubenswrapper[5055]: I1011 07:50:02.422418 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:50:02 crc kubenswrapper[5055]: I1011 07:50:02.423380 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:50:02 crc kubenswrapper[5055]: I1011 07:50:02.423451 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 07:50:02 crc kubenswrapper[5055]: I1011 07:50:02.424259 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"93a94dda3cc01b820a409ac42e93a7ef2a8f3fb9c492bcc359fd663c90041907"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 07:50:02 crc kubenswrapper[5055]: I1011 07:50:02.424378 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://93a94dda3cc01b820a409ac42e93a7ef2a8f3fb9c492bcc359fd663c90041907" gracePeriod=600 Oct 11 07:50:02 crc kubenswrapper[5055]: I1011 07:50:02.983387 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="93a94dda3cc01b820a409ac42e93a7ef2a8f3fb9c492bcc359fd663c90041907" exitCode=0 Oct 11 07:50:02 crc kubenswrapper[5055]: I1011 07:50:02.983823 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"93a94dda3cc01b820a409ac42e93a7ef2a8f3fb9c492bcc359fd663c90041907"} Oct 11 07:50:02 crc kubenswrapper[5055]: I1011 07:50:02.983862 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa"} Oct 11 07:50:02 crc kubenswrapper[5055]: I1011 07:50:02.983904 5055 scope.go:117] "RemoveContainer" containerID="e4d7d1cd5d14d2406efa7ea9274bc3564206a9a67e9467416f91d25792835e71" Oct 11 07:52:02 crc kubenswrapper[5055]: I1011 07:52:02.421848 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:52:02 crc kubenswrapper[5055]: I1011 07:52:02.423619 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:52:32 crc kubenswrapper[5055]: I1011 07:52:32.422856 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:52:32 crc kubenswrapper[5055]: I1011 07:52:32.424237 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:53:02 crc kubenswrapper[5055]: I1011 07:53:02.422588 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 07:53:02 crc kubenswrapper[5055]: I1011 07:53:02.423139 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 07:53:02 crc kubenswrapper[5055]: I1011 07:53:02.423182 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 07:53:02 crc kubenswrapper[5055]: I1011 07:53:02.423795 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 07:53:02 crc kubenswrapper[5055]: I1011 07:53:02.423860 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" gracePeriod=600 Oct 11 07:53:02 crc kubenswrapper[5055]: E1011 07:53:02.556429 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:53:03 crc kubenswrapper[5055]: I1011 07:53:03.368259 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" exitCode=0 Oct 11 07:53:03 crc kubenswrapper[5055]: I1011 07:53:03.368350 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa"} Oct 11 07:53:03 crc kubenswrapper[5055]: I1011 07:53:03.368522 5055 scope.go:117] "RemoveContainer" containerID="93a94dda3cc01b820a409ac42e93a7ef2a8f3fb9c492bcc359fd663c90041907" Oct 11 07:53:03 crc kubenswrapper[5055]: I1011 07:53:03.369174 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:53:03 crc kubenswrapper[5055]: E1011 07:53:03.369508 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.064467 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-m4p8t"] Oct 11 07:53:09 crc kubenswrapper[5055]: E1011 07:53:09.068906 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ded35850-f77e-48ce-a6e2-927b6c82b11c" containerName="registry-server" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.068957 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ded35850-f77e-48ce-a6e2-927b6c82b11c" containerName="registry-server" Oct 11 07:53:09 crc kubenswrapper[5055]: E1011 07:53:09.068968 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ded35850-f77e-48ce-a6e2-927b6c82b11c" containerName="extract-utilities" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.068975 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ded35850-f77e-48ce-a6e2-927b6c82b11c" containerName="extract-utilities" Oct 11 07:53:09 crc kubenswrapper[5055]: E1011 07:53:09.068994 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ded35850-f77e-48ce-a6e2-927b6c82b11c" containerName="extract-content" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.069000 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ded35850-f77e-48ce-a6e2-927b6c82b11c" containerName="extract-content" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.069167 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="ded35850-f77e-48ce-a6e2-927b6c82b11c" containerName="registry-server" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.071388 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.072166 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-m4p8t"] Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.216635 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-catalog-content\") pod \"certified-operators-m4p8t\" (UID: \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\") " pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.216725 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94dk7\" (UniqueName: \"kubernetes.io/projected/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-kube-api-access-94dk7\") pod \"certified-operators-m4p8t\" (UID: \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\") " pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.216785 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-utilities\") pod \"certified-operators-m4p8t\" (UID: \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\") " pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.318498 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-utilities\") pod \"certified-operators-m4p8t\" (UID: \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\") " pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.318611 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-catalog-content\") pod \"certified-operators-m4p8t\" (UID: \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\") " pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.318679 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94dk7\" (UniqueName: \"kubernetes.io/projected/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-kube-api-access-94dk7\") pod \"certified-operators-m4p8t\" (UID: \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\") " pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.319368 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-utilities\") pod \"certified-operators-m4p8t\" (UID: \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\") " pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.319461 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-catalog-content\") pod \"certified-operators-m4p8t\" (UID: \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\") " pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.344608 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94dk7\" (UniqueName: \"kubernetes.io/projected/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-kube-api-access-94dk7\") pod \"certified-operators-m4p8t\" (UID: \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\") " pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.404016 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:09 crc kubenswrapper[5055]: I1011 07:53:09.878951 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-m4p8t"] Oct 11 07:53:10 crc kubenswrapper[5055]: I1011 07:53:10.416887 5055 generic.go:334] "Generic (PLEG): container finished" podID="b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" containerID="bfec29d76adc73813c5b10d1dccf6fb51e7b08c8780355b4999cdc20a073cd6a" exitCode=0 Oct 11 07:53:10 crc kubenswrapper[5055]: I1011 07:53:10.416951 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m4p8t" event={"ID":"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd","Type":"ContainerDied","Data":"bfec29d76adc73813c5b10d1dccf6fb51e7b08c8780355b4999cdc20a073cd6a"} Oct 11 07:53:10 crc kubenswrapper[5055]: I1011 07:53:10.417131 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m4p8t" event={"ID":"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd","Type":"ContainerStarted","Data":"47c911c4b9fcd41ab3b82c4f717dbd6f92aff6f6ee2b158b92d50f5cd67e5f84"} Oct 11 07:53:11 crc kubenswrapper[5055]: I1011 07:53:11.426486 5055 generic.go:334] "Generic (PLEG): container finished" podID="b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" containerID="92ea245cc718300ca427ef63cee38f1f768713e3af596b37983dd2965a0b1138" exitCode=0 Oct 11 07:53:11 crc kubenswrapper[5055]: I1011 07:53:11.426539 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m4p8t" event={"ID":"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd","Type":"ContainerDied","Data":"92ea245cc718300ca427ef63cee38f1f768713e3af596b37983dd2965a0b1138"} Oct 11 07:53:12 crc kubenswrapper[5055]: I1011 07:53:12.433928 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m4p8t" event={"ID":"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd","Type":"ContainerStarted","Data":"d461865b5a01b77b1869ba4ae8c9411d00f9d018437d7b82c4f3d15585441ded"} Oct 11 07:53:12 crc kubenswrapper[5055]: I1011 07:53:12.450922 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-m4p8t" podStartSLOduration=2.060563088 podStartE2EDuration="3.450903121s" podCreationTimestamp="2025-10-11 07:53:09 +0000 UTC" firstStartedPulling="2025-10-11 07:53:10.418990921 +0000 UTC m=+3574.193264738" lastFinishedPulling="2025-10-11 07:53:11.809330964 +0000 UTC m=+3575.583604771" observedRunningTime="2025-10-11 07:53:12.449272544 +0000 UTC m=+3576.223546351" watchObservedRunningTime="2025-10-11 07:53:12.450903121 +0000 UTC m=+3576.225176928" Oct 11 07:53:16 crc kubenswrapper[5055]: I1011 07:53:16.998578 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:53:16 crc kubenswrapper[5055]: E1011 07:53:16.999127 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:53:19 crc kubenswrapper[5055]: I1011 07:53:19.404751 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:19 crc kubenswrapper[5055]: I1011 07:53:19.405203 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:19 crc kubenswrapper[5055]: I1011 07:53:19.454246 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:19 crc kubenswrapper[5055]: I1011 07:53:19.552423 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:19 crc kubenswrapper[5055]: I1011 07:53:19.692118 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-m4p8t"] Oct 11 07:53:21 crc kubenswrapper[5055]: I1011 07:53:21.504360 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-m4p8t" podUID="b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" containerName="registry-server" containerID="cri-o://d461865b5a01b77b1869ba4ae8c9411d00f9d018437d7b82c4f3d15585441ded" gracePeriod=2 Oct 11 07:53:21 crc kubenswrapper[5055]: I1011 07:53:21.862893 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:21 crc kubenswrapper[5055]: I1011 07:53:21.998112 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-catalog-content\") pod \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\" (UID: \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\") " Oct 11 07:53:21 crc kubenswrapper[5055]: I1011 07:53:21.998176 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94dk7\" (UniqueName: \"kubernetes.io/projected/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-kube-api-access-94dk7\") pod \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\" (UID: \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\") " Oct 11 07:53:21 crc kubenswrapper[5055]: I1011 07:53:21.998210 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-utilities\") pod \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\" (UID: \"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd\") " Oct 11 07:53:21 crc kubenswrapper[5055]: I1011 07:53:21.999294 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-utilities" (OuterVolumeSpecName: "utilities") pod "b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" (UID: "b0c16bdc-85f3-4efe-a34e-60c7a64d95cd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.011162 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-kube-api-access-94dk7" (OuterVolumeSpecName: "kube-api-access-94dk7") pod "b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" (UID: "b0c16bdc-85f3-4efe-a34e-60c7a64d95cd"). InnerVolumeSpecName "kube-api-access-94dk7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.043988 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" (UID: "b0c16bdc-85f3-4efe-a34e-60c7a64d95cd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.100179 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.100227 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94dk7\" (UniqueName: \"kubernetes.io/projected/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-kube-api-access-94dk7\") on node \"crc\" DevicePath \"\"" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.100239 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.515713 5055 generic.go:334] "Generic (PLEG): container finished" podID="b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" containerID="d461865b5a01b77b1869ba4ae8c9411d00f9d018437d7b82c4f3d15585441ded" exitCode=0 Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.515756 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m4p8t" event={"ID":"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd","Type":"ContainerDied","Data":"d461865b5a01b77b1869ba4ae8c9411d00f9d018437d7b82c4f3d15585441ded"} Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.515810 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m4p8t" event={"ID":"b0c16bdc-85f3-4efe-a34e-60c7a64d95cd","Type":"ContainerDied","Data":"47c911c4b9fcd41ab3b82c4f717dbd6f92aff6f6ee2b158b92d50f5cd67e5f84"} Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.515829 5055 scope.go:117] "RemoveContainer" containerID="d461865b5a01b77b1869ba4ae8c9411d00f9d018437d7b82c4f3d15585441ded" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.515840 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m4p8t" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.544007 5055 scope.go:117] "RemoveContainer" containerID="92ea245cc718300ca427ef63cee38f1f768713e3af596b37983dd2965a0b1138" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.550932 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-m4p8t"] Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.557453 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-m4p8t"] Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.571164 5055 scope.go:117] "RemoveContainer" containerID="bfec29d76adc73813c5b10d1dccf6fb51e7b08c8780355b4999cdc20a073cd6a" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.587170 5055 scope.go:117] "RemoveContainer" containerID="d461865b5a01b77b1869ba4ae8c9411d00f9d018437d7b82c4f3d15585441ded" Oct 11 07:53:22 crc kubenswrapper[5055]: E1011 07:53:22.587611 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d461865b5a01b77b1869ba4ae8c9411d00f9d018437d7b82c4f3d15585441ded\": container with ID starting with d461865b5a01b77b1869ba4ae8c9411d00f9d018437d7b82c4f3d15585441ded not found: ID does not exist" containerID="d461865b5a01b77b1869ba4ae8c9411d00f9d018437d7b82c4f3d15585441ded" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.587652 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d461865b5a01b77b1869ba4ae8c9411d00f9d018437d7b82c4f3d15585441ded"} err="failed to get container status \"d461865b5a01b77b1869ba4ae8c9411d00f9d018437d7b82c4f3d15585441ded\": rpc error: code = NotFound desc = could not find container \"d461865b5a01b77b1869ba4ae8c9411d00f9d018437d7b82c4f3d15585441ded\": container with ID starting with d461865b5a01b77b1869ba4ae8c9411d00f9d018437d7b82c4f3d15585441ded not found: ID does not exist" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.587679 5055 scope.go:117] "RemoveContainer" containerID="92ea245cc718300ca427ef63cee38f1f768713e3af596b37983dd2965a0b1138" Oct 11 07:53:22 crc kubenswrapper[5055]: E1011 07:53:22.588007 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92ea245cc718300ca427ef63cee38f1f768713e3af596b37983dd2965a0b1138\": container with ID starting with 92ea245cc718300ca427ef63cee38f1f768713e3af596b37983dd2965a0b1138 not found: ID does not exist" containerID="92ea245cc718300ca427ef63cee38f1f768713e3af596b37983dd2965a0b1138" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.588043 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92ea245cc718300ca427ef63cee38f1f768713e3af596b37983dd2965a0b1138"} err="failed to get container status \"92ea245cc718300ca427ef63cee38f1f768713e3af596b37983dd2965a0b1138\": rpc error: code = NotFound desc = could not find container \"92ea245cc718300ca427ef63cee38f1f768713e3af596b37983dd2965a0b1138\": container with ID starting with 92ea245cc718300ca427ef63cee38f1f768713e3af596b37983dd2965a0b1138 not found: ID does not exist" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.588068 5055 scope.go:117] "RemoveContainer" containerID="bfec29d76adc73813c5b10d1dccf6fb51e7b08c8780355b4999cdc20a073cd6a" Oct 11 07:53:22 crc kubenswrapper[5055]: E1011 07:53:22.588318 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfec29d76adc73813c5b10d1dccf6fb51e7b08c8780355b4999cdc20a073cd6a\": container with ID starting with bfec29d76adc73813c5b10d1dccf6fb51e7b08c8780355b4999cdc20a073cd6a not found: ID does not exist" containerID="bfec29d76adc73813c5b10d1dccf6fb51e7b08c8780355b4999cdc20a073cd6a" Oct 11 07:53:22 crc kubenswrapper[5055]: I1011 07:53:22.588341 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfec29d76adc73813c5b10d1dccf6fb51e7b08c8780355b4999cdc20a073cd6a"} err="failed to get container status \"bfec29d76adc73813c5b10d1dccf6fb51e7b08c8780355b4999cdc20a073cd6a\": rpc error: code = NotFound desc = could not find container \"bfec29d76adc73813c5b10d1dccf6fb51e7b08c8780355b4999cdc20a073cd6a\": container with ID starting with bfec29d76adc73813c5b10d1dccf6fb51e7b08c8780355b4999cdc20a073cd6a not found: ID does not exist" Oct 11 07:53:23 crc kubenswrapper[5055]: I1011 07:53:23.011051 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" path="/var/lib/kubelet/pods/b0c16bdc-85f3-4efe-a34e-60c7a64d95cd/volumes" Oct 11 07:53:31 crc kubenswrapper[5055]: I1011 07:53:31.993679 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:53:31 crc kubenswrapper[5055]: E1011 07:53:31.994470 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:53:47 crc kubenswrapper[5055]: I1011 07:53:47.003998 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:53:47 crc kubenswrapper[5055]: E1011 07:53:47.006388 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:53:58 crc kubenswrapper[5055]: I1011 07:53:58.993333 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:53:58 crc kubenswrapper[5055]: E1011 07:53:58.994052 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:54:13 crc kubenswrapper[5055]: I1011 07:54:13.004356 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:54:13 crc kubenswrapper[5055]: E1011 07:54:13.005712 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:54:25 crc kubenswrapper[5055]: I1011 07:54:25.994058 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:54:25 crc kubenswrapper[5055]: E1011 07:54:25.995205 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:54:38 crc kubenswrapper[5055]: I1011 07:54:38.994019 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:54:38 crc kubenswrapper[5055]: E1011 07:54:38.994816 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:54:53 crc kubenswrapper[5055]: I1011 07:54:53.993221 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:54:53 crc kubenswrapper[5055]: E1011 07:54:53.994349 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:55:04 crc kubenswrapper[5055]: I1011 07:55:04.993489 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:55:04 crc kubenswrapper[5055]: E1011 07:55:04.993936 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:55:16 crc kubenswrapper[5055]: I1011 07:55:16.997831 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:55:16 crc kubenswrapper[5055]: E1011 07:55:16.998465 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:55:30 crc kubenswrapper[5055]: I1011 07:55:30.993885 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:55:30 crc kubenswrapper[5055]: E1011 07:55:30.994601 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:55:41 crc kubenswrapper[5055]: I1011 07:55:41.993507 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:55:41 crc kubenswrapper[5055]: E1011 07:55:41.994302 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:55:54 crc kubenswrapper[5055]: I1011 07:55:54.995017 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:55:54 crc kubenswrapper[5055]: E1011 07:55:54.996939 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:56:05 crc kubenswrapper[5055]: I1011 07:56:05.993289 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:56:05 crc kubenswrapper[5055]: E1011 07:56:05.994047 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:56:18 crc kubenswrapper[5055]: I1011 07:56:18.993066 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:56:18 crc kubenswrapper[5055]: E1011 07:56:18.995048 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:56:30 crc kubenswrapper[5055]: I1011 07:56:30.994164 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:56:30 crc kubenswrapper[5055]: E1011 07:56:30.995037 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:56:41 crc kubenswrapper[5055]: I1011 07:56:41.993652 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:56:41 crc kubenswrapper[5055]: E1011 07:56:41.995393 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:56:56 crc kubenswrapper[5055]: I1011 07:56:56.996686 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:56:56 crc kubenswrapper[5055]: E1011 07:56:56.997522 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:57:08 crc kubenswrapper[5055]: I1011 07:57:08.996699 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:57:08 crc kubenswrapper[5055]: E1011 07:57:08.997641 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:57:22 crc kubenswrapper[5055]: I1011 07:57:22.995184 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:57:22 crc kubenswrapper[5055]: E1011 07:57:22.995838 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:57:34 crc kubenswrapper[5055]: I1011 07:57:34.993531 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:57:34 crc kubenswrapper[5055]: E1011 07:57:34.995377 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:57:42 crc kubenswrapper[5055]: I1011 07:57:42.827913 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9lnpq"] Oct 11 07:57:42 crc kubenswrapper[5055]: E1011 07:57:42.828699 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" containerName="extract-content" Oct 11 07:57:42 crc kubenswrapper[5055]: I1011 07:57:42.828711 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" containerName="extract-content" Oct 11 07:57:42 crc kubenswrapper[5055]: E1011 07:57:42.828728 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" containerName="extract-utilities" Oct 11 07:57:42 crc kubenswrapper[5055]: I1011 07:57:42.828735 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" containerName="extract-utilities" Oct 11 07:57:42 crc kubenswrapper[5055]: E1011 07:57:42.828780 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" containerName="registry-server" Oct 11 07:57:42 crc kubenswrapper[5055]: I1011 07:57:42.828790 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" containerName="registry-server" Oct 11 07:57:42 crc kubenswrapper[5055]: I1011 07:57:42.828939 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0c16bdc-85f3-4efe-a34e-60c7a64d95cd" containerName="registry-server" Oct 11 07:57:42 crc kubenswrapper[5055]: I1011 07:57:42.830178 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:42 crc kubenswrapper[5055]: I1011 07:57:42.849178 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9lnpq"] Oct 11 07:57:43 crc kubenswrapper[5055]: I1011 07:57:43.010852 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52f5645a-a244-4987-961b-4c22739d0b80-utilities\") pod \"community-operators-9lnpq\" (UID: \"52f5645a-a244-4987-961b-4c22739d0b80\") " pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:43 crc kubenswrapper[5055]: I1011 07:57:43.010934 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52f5645a-a244-4987-961b-4c22739d0b80-catalog-content\") pod \"community-operators-9lnpq\" (UID: \"52f5645a-a244-4987-961b-4c22739d0b80\") " pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:43 crc kubenswrapper[5055]: I1011 07:57:43.011012 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rf5h9\" (UniqueName: \"kubernetes.io/projected/52f5645a-a244-4987-961b-4c22739d0b80-kube-api-access-rf5h9\") pod \"community-operators-9lnpq\" (UID: \"52f5645a-a244-4987-961b-4c22739d0b80\") " pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:43 crc kubenswrapper[5055]: I1011 07:57:43.111935 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52f5645a-a244-4987-961b-4c22739d0b80-catalog-content\") pod \"community-operators-9lnpq\" (UID: \"52f5645a-a244-4987-961b-4c22739d0b80\") " pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:43 crc kubenswrapper[5055]: I1011 07:57:43.112070 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rf5h9\" (UniqueName: \"kubernetes.io/projected/52f5645a-a244-4987-961b-4c22739d0b80-kube-api-access-rf5h9\") pod \"community-operators-9lnpq\" (UID: \"52f5645a-a244-4987-961b-4c22739d0b80\") " pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:43 crc kubenswrapper[5055]: I1011 07:57:43.112137 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52f5645a-a244-4987-961b-4c22739d0b80-utilities\") pod \"community-operators-9lnpq\" (UID: \"52f5645a-a244-4987-961b-4c22739d0b80\") " pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:43 crc kubenswrapper[5055]: I1011 07:57:43.112702 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52f5645a-a244-4987-961b-4c22739d0b80-catalog-content\") pod \"community-operators-9lnpq\" (UID: \"52f5645a-a244-4987-961b-4c22739d0b80\") " pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:43 crc kubenswrapper[5055]: I1011 07:57:43.112864 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52f5645a-a244-4987-961b-4c22739d0b80-utilities\") pod \"community-operators-9lnpq\" (UID: \"52f5645a-a244-4987-961b-4c22739d0b80\") " pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:43 crc kubenswrapper[5055]: I1011 07:57:43.142875 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rf5h9\" (UniqueName: \"kubernetes.io/projected/52f5645a-a244-4987-961b-4c22739d0b80-kube-api-access-rf5h9\") pod \"community-operators-9lnpq\" (UID: \"52f5645a-a244-4987-961b-4c22739d0b80\") " pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:43 crc kubenswrapper[5055]: I1011 07:57:43.158588 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:43 crc kubenswrapper[5055]: I1011 07:57:43.399623 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9lnpq"] Oct 11 07:57:44 crc kubenswrapper[5055]: I1011 07:57:44.344261 5055 generic.go:334] "Generic (PLEG): container finished" podID="52f5645a-a244-4987-961b-4c22739d0b80" containerID="a76dd7dd19b819625f4b906fd00605f36c5293e4f8f3b58f7d60fa8f2496242f" exitCode=0 Oct 11 07:57:44 crc kubenswrapper[5055]: I1011 07:57:44.344369 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9lnpq" event={"ID":"52f5645a-a244-4987-961b-4c22739d0b80","Type":"ContainerDied","Data":"a76dd7dd19b819625f4b906fd00605f36c5293e4f8f3b58f7d60fa8f2496242f"} Oct 11 07:57:44 crc kubenswrapper[5055]: I1011 07:57:44.344641 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9lnpq" event={"ID":"52f5645a-a244-4987-961b-4c22739d0b80","Type":"ContainerStarted","Data":"111a767374427241459a5a13c86981720bcdad51aaf541fa6465dc2f63184e0c"} Oct 11 07:57:44 crc kubenswrapper[5055]: I1011 07:57:44.347606 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 07:57:46 crc kubenswrapper[5055]: I1011 07:57:46.363874 5055 generic.go:334] "Generic (PLEG): container finished" podID="52f5645a-a244-4987-961b-4c22739d0b80" containerID="76f71aaef1d0b9384989b23ae200867c688cdfd76e6f6944304273e0f822a27a" exitCode=0 Oct 11 07:57:46 crc kubenswrapper[5055]: I1011 07:57:46.363959 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9lnpq" event={"ID":"52f5645a-a244-4987-961b-4c22739d0b80","Type":"ContainerDied","Data":"76f71aaef1d0b9384989b23ae200867c688cdfd76e6f6944304273e0f822a27a"} Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.372538 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9lnpq" event={"ID":"52f5645a-a244-4987-961b-4c22739d0b80","Type":"ContainerStarted","Data":"92954b608b8c334fd7428f087fac94222466e6825a242e2eff4a60d725b522b6"} Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.394131 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9lnpq" podStartSLOduration=2.964989959 podStartE2EDuration="5.394112945s" podCreationTimestamp="2025-10-11 07:57:42 +0000 UTC" firstStartedPulling="2025-10-11 07:57:44.347361078 +0000 UTC m=+3848.121634885" lastFinishedPulling="2025-10-11 07:57:46.776484044 +0000 UTC m=+3850.550757871" observedRunningTime="2025-10-11 07:57:47.392257833 +0000 UTC m=+3851.166531650" watchObservedRunningTime="2025-10-11 07:57:47.394112945 +0000 UTC m=+3851.168386742" Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.630508 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7bqdn"] Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.633046 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.649132 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7bqdn"] Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.793001 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98388146-5e1a-4988-967b-0f5fec05e3d3-catalog-content\") pod \"redhat-operators-7bqdn\" (UID: \"98388146-5e1a-4988-967b-0f5fec05e3d3\") " pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.793074 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98388146-5e1a-4988-967b-0f5fec05e3d3-utilities\") pod \"redhat-operators-7bqdn\" (UID: \"98388146-5e1a-4988-967b-0f5fec05e3d3\") " pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.793159 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bsf4\" (UniqueName: \"kubernetes.io/projected/98388146-5e1a-4988-967b-0f5fec05e3d3-kube-api-access-4bsf4\") pod \"redhat-operators-7bqdn\" (UID: \"98388146-5e1a-4988-967b-0f5fec05e3d3\") " pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.894682 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98388146-5e1a-4988-967b-0f5fec05e3d3-utilities\") pod \"redhat-operators-7bqdn\" (UID: \"98388146-5e1a-4988-967b-0f5fec05e3d3\") " pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.894820 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bsf4\" (UniqueName: \"kubernetes.io/projected/98388146-5e1a-4988-967b-0f5fec05e3d3-kube-api-access-4bsf4\") pod \"redhat-operators-7bqdn\" (UID: \"98388146-5e1a-4988-967b-0f5fec05e3d3\") " pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.894868 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98388146-5e1a-4988-967b-0f5fec05e3d3-catalog-content\") pod \"redhat-operators-7bqdn\" (UID: \"98388146-5e1a-4988-967b-0f5fec05e3d3\") " pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.895350 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98388146-5e1a-4988-967b-0f5fec05e3d3-utilities\") pod \"redhat-operators-7bqdn\" (UID: \"98388146-5e1a-4988-967b-0f5fec05e3d3\") " pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.895377 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98388146-5e1a-4988-967b-0f5fec05e3d3-catalog-content\") pod \"redhat-operators-7bqdn\" (UID: \"98388146-5e1a-4988-967b-0f5fec05e3d3\") " pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.913664 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bsf4\" (UniqueName: \"kubernetes.io/projected/98388146-5e1a-4988-967b-0f5fec05e3d3-kube-api-access-4bsf4\") pod \"redhat-operators-7bqdn\" (UID: \"98388146-5e1a-4988-967b-0f5fec05e3d3\") " pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:47 crc kubenswrapper[5055]: I1011 07:57:47.993287 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:48 crc kubenswrapper[5055]: I1011 07:57:48.442002 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7bqdn"] Oct 11 07:57:48 crc kubenswrapper[5055]: W1011 07:57:48.452926 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98388146_5e1a_4988_967b_0f5fec05e3d3.slice/crio-13110cc7d9fe258ef0f1b10292087404d73c1905a892253838963e6c206569b8 WatchSource:0}: Error finding container 13110cc7d9fe258ef0f1b10292087404d73c1905a892253838963e6c206569b8: Status 404 returned error can't find the container with id 13110cc7d9fe258ef0f1b10292087404d73c1905a892253838963e6c206569b8 Oct 11 07:57:49 crc kubenswrapper[5055]: I1011 07:57:49.385581 5055 generic.go:334] "Generic (PLEG): container finished" podID="98388146-5e1a-4988-967b-0f5fec05e3d3" containerID="20a4fe08d8b4985b8b34a218530e3fe2e81ae0fc007ab9da24f2a92cc45aa7dc" exitCode=0 Oct 11 07:57:49 crc kubenswrapper[5055]: I1011 07:57:49.385678 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bqdn" event={"ID":"98388146-5e1a-4988-967b-0f5fec05e3d3","Type":"ContainerDied","Data":"20a4fe08d8b4985b8b34a218530e3fe2e81ae0fc007ab9da24f2a92cc45aa7dc"} Oct 11 07:57:49 crc kubenswrapper[5055]: I1011 07:57:49.385912 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bqdn" event={"ID":"98388146-5e1a-4988-967b-0f5fec05e3d3","Type":"ContainerStarted","Data":"13110cc7d9fe258ef0f1b10292087404d73c1905a892253838963e6c206569b8"} Oct 11 07:57:49 crc kubenswrapper[5055]: I1011 07:57:49.994003 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:57:49 crc kubenswrapper[5055]: E1011 07:57:49.994541 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:57:50 crc kubenswrapper[5055]: I1011 07:57:50.394208 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bqdn" event={"ID":"98388146-5e1a-4988-967b-0f5fec05e3d3","Type":"ContainerStarted","Data":"a56cdc0878fbd3bb8790f9f8f95ff21e31b250c977271364be5c7e9eb4ef2e47"} Oct 11 07:57:51 crc kubenswrapper[5055]: I1011 07:57:51.402458 5055 generic.go:334] "Generic (PLEG): container finished" podID="98388146-5e1a-4988-967b-0f5fec05e3d3" containerID="a56cdc0878fbd3bb8790f9f8f95ff21e31b250c977271364be5c7e9eb4ef2e47" exitCode=0 Oct 11 07:57:51 crc kubenswrapper[5055]: I1011 07:57:51.402507 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bqdn" event={"ID":"98388146-5e1a-4988-967b-0f5fec05e3d3","Type":"ContainerDied","Data":"a56cdc0878fbd3bb8790f9f8f95ff21e31b250c977271364be5c7e9eb4ef2e47"} Oct 11 07:57:52 crc kubenswrapper[5055]: I1011 07:57:52.412523 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bqdn" event={"ID":"98388146-5e1a-4988-967b-0f5fec05e3d3","Type":"ContainerStarted","Data":"67a31834abb7335e2e3684545ef06489533082bf73f813e5eaa000142b1c5970"} Oct 11 07:57:52 crc kubenswrapper[5055]: I1011 07:57:52.432275 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7bqdn" podStartSLOduration=3.022352784 podStartE2EDuration="5.432258584s" podCreationTimestamp="2025-10-11 07:57:47 +0000 UTC" firstStartedPulling="2025-10-11 07:57:49.386925177 +0000 UTC m=+3853.161198974" lastFinishedPulling="2025-10-11 07:57:51.796830967 +0000 UTC m=+3855.571104774" observedRunningTime="2025-10-11 07:57:52.432070929 +0000 UTC m=+3856.206344736" watchObservedRunningTime="2025-10-11 07:57:52.432258584 +0000 UTC m=+3856.206532391" Oct 11 07:57:53 crc kubenswrapper[5055]: I1011 07:57:53.159379 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:53 crc kubenswrapper[5055]: I1011 07:57:53.159443 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:53 crc kubenswrapper[5055]: I1011 07:57:53.202108 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:53 crc kubenswrapper[5055]: I1011 07:57:53.455951 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:55 crc kubenswrapper[5055]: I1011 07:57:55.614355 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9lnpq"] Oct 11 07:57:55 crc kubenswrapper[5055]: I1011 07:57:55.614604 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9lnpq" podUID="52f5645a-a244-4987-961b-4c22739d0b80" containerName="registry-server" containerID="cri-o://92954b608b8c334fd7428f087fac94222466e6825a242e2eff4a60d725b522b6" gracePeriod=2 Oct 11 07:57:56 crc kubenswrapper[5055]: I1011 07:57:56.781047 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:56 crc kubenswrapper[5055]: I1011 07:57:56.921264 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rf5h9\" (UniqueName: \"kubernetes.io/projected/52f5645a-a244-4987-961b-4c22739d0b80-kube-api-access-rf5h9\") pod \"52f5645a-a244-4987-961b-4c22739d0b80\" (UID: \"52f5645a-a244-4987-961b-4c22739d0b80\") " Oct 11 07:57:56 crc kubenswrapper[5055]: I1011 07:57:56.921581 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52f5645a-a244-4987-961b-4c22739d0b80-utilities\") pod \"52f5645a-a244-4987-961b-4c22739d0b80\" (UID: \"52f5645a-a244-4987-961b-4c22739d0b80\") " Oct 11 07:57:56 crc kubenswrapper[5055]: I1011 07:57:56.921613 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52f5645a-a244-4987-961b-4c22739d0b80-catalog-content\") pod \"52f5645a-a244-4987-961b-4c22739d0b80\" (UID: \"52f5645a-a244-4987-961b-4c22739d0b80\") " Oct 11 07:57:56 crc kubenswrapper[5055]: I1011 07:57:56.922322 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52f5645a-a244-4987-961b-4c22739d0b80-utilities" (OuterVolumeSpecName: "utilities") pod "52f5645a-a244-4987-961b-4c22739d0b80" (UID: "52f5645a-a244-4987-961b-4c22739d0b80"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:57:56 crc kubenswrapper[5055]: I1011 07:57:56.926992 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52f5645a-a244-4987-961b-4c22739d0b80-kube-api-access-rf5h9" (OuterVolumeSpecName: "kube-api-access-rf5h9") pod "52f5645a-a244-4987-961b-4c22739d0b80" (UID: "52f5645a-a244-4987-961b-4c22739d0b80"). InnerVolumeSpecName "kube-api-access-rf5h9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:57:56 crc kubenswrapper[5055]: I1011 07:57:56.971131 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52f5645a-a244-4987-961b-4c22739d0b80-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "52f5645a-a244-4987-961b-4c22739d0b80" (UID: "52f5645a-a244-4987-961b-4c22739d0b80"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.023069 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rf5h9\" (UniqueName: \"kubernetes.io/projected/52f5645a-a244-4987-961b-4c22739d0b80-kube-api-access-rf5h9\") on node \"crc\" DevicePath \"\"" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.023116 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52f5645a-a244-4987-961b-4c22739d0b80-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.023129 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52f5645a-a244-4987-961b-4c22739d0b80-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.442972 5055 generic.go:334] "Generic (PLEG): container finished" podID="52f5645a-a244-4987-961b-4c22739d0b80" containerID="92954b608b8c334fd7428f087fac94222466e6825a242e2eff4a60d725b522b6" exitCode=0 Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.443022 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9lnpq" event={"ID":"52f5645a-a244-4987-961b-4c22739d0b80","Type":"ContainerDied","Data":"92954b608b8c334fd7428f087fac94222466e6825a242e2eff4a60d725b522b6"} Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.443047 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9lnpq" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.443060 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9lnpq" event={"ID":"52f5645a-a244-4987-961b-4c22739d0b80","Type":"ContainerDied","Data":"111a767374427241459a5a13c86981720bcdad51aaf541fa6465dc2f63184e0c"} Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.443086 5055 scope.go:117] "RemoveContainer" containerID="92954b608b8c334fd7428f087fac94222466e6825a242e2eff4a60d725b522b6" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.469136 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9lnpq"] Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.474867 5055 scope.go:117] "RemoveContainer" containerID="76f71aaef1d0b9384989b23ae200867c688cdfd76e6f6944304273e0f822a27a" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.476421 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9lnpq"] Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.490919 5055 scope.go:117] "RemoveContainer" containerID="a76dd7dd19b819625f4b906fd00605f36c5293e4f8f3b58f7d60fa8f2496242f" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.515633 5055 scope.go:117] "RemoveContainer" containerID="92954b608b8c334fd7428f087fac94222466e6825a242e2eff4a60d725b522b6" Oct 11 07:57:57 crc kubenswrapper[5055]: E1011 07:57:57.516041 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92954b608b8c334fd7428f087fac94222466e6825a242e2eff4a60d725b522b6\": container with ID starting with 92954b608b8c334fd7428f087fac94222466e6825a242e2eff4a60d725b522b6 not found: ID does not exist" containerID="92954b608b8c334fd7428f087fac94222466e6825a242e2eff4a60d725b522b6" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.516075 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92954b608b8c334fd7428f087fac94222466e6825a242e2eff4a60d725b522b6"} err="failed to get container status \"92954b608b8c334fd7428f087fac94222466e6825a242e2eff4a60d725b522b6\": rpc error: code = NotFound desc = could not find container \"92954b608b8c334fd7428f087fac94222466e6825a242e2eff4a60d725b522b6\": container with ID starting with 92954b608b8c334fd7428f087fac94222466e6825a242e2eff4a60d725b522b6 not found: ID does not exist" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.516097 5055 scope.go:117] "RemoveContainer" containerID="76f71aaef1d0b9384989b23ae200867c688cdfd76e6f6944304273e0f822a27a" Oct 11 07:57:57 crc kubenswrapper[5055]: E1011 07:57:57.516310 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76f71aaef1d0b9384989b23ae200867c688cdfd76e6f6944304273e0f822a27a\": container with ID starting with 76f71aaef1d0b9384989b23ae200867c688cdfd76e6f6944304273e0f822a27a not found: ID does not exist" containerID="76f71aaef1d0b9384989b23ae200867c688cdfd76e6f6944304273e0f822a27a" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.516331 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76f71aaef1d0b9384989b23ae200867c688cdfd76e6f6944304273e0f822a27a"} err="failed to get container status \"76f71aaef1d0b9384989b23ae200867c688cdfd76e6f6944304273e0f822a27a\": rpc error: code = NotFound desc = could not find container \"76f71aaef1d0b9384989b23ae200867c688cdfd76e6f6944304273e0f822a27a\": container with ID starting with 76f71aaef1d0b9384989b23ae200867c688cdfd76e6f6944304273e0f822a27a not found: ID does not exist" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.516345 5055 scope.go:117] "RemoveContainer" containerID="a76dd7dd19b819625f4b906fd00605f36c5293e4f8f3b58f7d60fa8f2496242f" Oct 11 07:57:57 crc kubenswrapper[5055]: E1011 07:57:57.516517 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a76dd7dd19b819625f4b906fd00605f36c5293e4f8f3b58f7d60fa8f2496242f\": container with ID starting with a76dd7dd19b819625f4b906fd00605f36c5293e4f8f3b58f7d60fa8f2496242f not found: ID does not exist" containerID="a76dd7dd19b819625f4b906fd00605f36c5293e4f8f3b58f7d60fa8f2496242f" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.516536 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a76dd7dd19b819625f4b906fd00605f36c5293e4f8f3b58f7d60fa8f2496242f"} err="failed to get container status \"a76dd7dd19b819625f4b906fd00605f36c5293e4f8f3b58f7d60fa8f2496242f\": rpc error: code = NotFound desc = could not find container \"a76dd7dd19b819625f4b906fd00605f36c5293e4f8f3b58f7d60fa8f2496242f\": container with ID starting with a76dd7dd19b819625f4b906fd00605f36c5293e4f8f3b58f7d60fa8f2496242f not found: ID does not exist" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.993516 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:57 crc kubenswrapper[5055]: I1011 07:57:57.993570 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:58 crc kubenswrapper[5055]: I1011 07:57:58.042541 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:58 crc kubenswrapper[5055]: I1011 07:57:58.494325 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:57:59 crc kubenswrapper[5055]: I1011 07:57:59.002335 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52f5645a-a244-4987-961b-4c22739d0b80" path="/var/lib/kubelet/pods/52f5645a-a244-4987-961b-4c22739d0b80/volumes" Oct 11 07:58:00 crc kubenswrapper[5055]: I1011 07:58:00.013411 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7bqdn"] Oct 11 07:58:00 crc kubenswrapper[5055]: I1011 07:58:00.467242 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7bqdn" podUID="98388146-5e1a-4988-967b-0f5fec05e3d3" containerName="registry-server" containerID="cri-o://67a31834abb7335e2e3684545ef06489533082bf73f813e5eaa000142b1c5970" gracePeriod=2 Oct 11 07:58:00 crc kubenswrapper[5055]: I1011 07:58:00.993143 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:58:00 crc kubenswrapper[5055]: E1011 07:58:00.993541 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 07:58:01 crc kubenswrapper[5055]: I1011 07:58:01.474233 5055 generic.go:334] "Generic (PLEG): container finished" podID="98388146-5e1a-4988-967b-0f5fec05e3d3" containerID="67a31834abb7335e2e3684545ef06489533082bf73f813e5eaa000142b1c5970" exitCode=0 Oct 11 07:58:01 crc kubenswrapper[5055]: I1011 07:58:01.474276 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bqdn" event={"ID":"98388146-5e1a-4988-967b-0f5fec05e3d3","Type":"ContainerDied","Data":"67a31834abb7335e2e3684545ef06489533082bf73f813e5eaa000142b1c5970"} Oct 11 07:58:01 crc kubenswrapper[5055]: I1011 07:58:01.936203 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.088120 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98388146-5e1a-4988-967b-0f5fec05e3d3-utilities\") pod \"98388146-5e1a-4988-967b-0f5fec05e3d3\" (UID: \"98388146-5e1a-4988-967b-0f5fec05e3d3\") " Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.088310 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bsf4\" (UniqueName: \"kubernetes.io/projected/98388146-5e1a-4988-967b-0f5fec05e3d3-kube-api-access-4bsf4\") pod \"98388146-5e1a-4988-967b-0f5fec05e3d3\" (UID: \"98388146-5e1a-4988-967b-0f5fec05e3d3\") " Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.088384 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98388146-5e1a-4988-967b-0f5fec05e3d3-catalog-content\") pod \"98388146-5e1a-4988-967b-0f5fec05e3d3\" (UID: \"98388146-5e1a-4988-967b-0f5fec05e3d3\") " Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.089077 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98388146-5e1a-4988-967b-0f5fec05e3d3-utilities" (OuterVolumeSpecName: "utilities") pod "98388146-5e1a-4988-967b-0f5fec05e3d3" (UID: "98388146-5e1a-4988-967b-0f5fec05e3d3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.098781 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98388146-5e1a-4988-967b-0f5fec05e3d3-kube-api-access-4bsf4" (OuterVolumeSpecName: "kube-api-access-4bsf4") pod "98388146-5e1a-4988-967b-0f5fec05e3d3" (UID: "98388146-5e1a-4988-967b-0f5fec05e3d3"). InnerVolumeSpecName "kube-api-access-4bsf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.172131 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98388146-5e1a-4988-967b-0f5fec05e3d3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "98388146-5e1a-4988-967b-0f5fec05e3d3" (UID: "98388146-5e1a-4988-967b-0f5fec05e3d3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.189569 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98388146-5e1a-4988-967b-0f5fec05e3d3-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.189619 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bsf4\" (UniqueName: \"kubernetes.io/projected/98388146-5e1a-4988-967b-0f5fec05e3d3-kube-api-access-4bsf4\") on node \"crc\" DevicePath \"\"" Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.189629 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98388146-5e1a-4988-967b-0f5fec05e3d3-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.482745 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7bqdn" event={"ID":"98388146-5e1a-4988-967b-0f5fec05e3d3","Type":"ContainerDied","Data":"13110cc7d9fe258ef0f1b10292087404d73c1905a892253838963e6c206569b8"} Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.482824 5055 scope.go:117] "RemoveContainer" containerID="67a31834abb7335e2e3684545ef06489533082bf73f813e5eaa000142b1c5970" Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.482937 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7bqdn" Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.531779 5055 scope.go:117] "RemoveContainer" containerID="a56cdc0878fbd3bb8790f9f8f95ff21e31b250c977271364be5c7e9eb4ef2e47" Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.534242 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7bqdn"] Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.540439 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7bqdn"] Oct 11 07:58:02 crc kubenswrapper[5055]: I1011 07:58:02.552974 5055 scope.go:117] "RemoveContainer" containerID="20a4fe08d8b4985b8b34a218530e3fe2e81ae0fc007ab9da24f2a92cc45aa7dc" Oct 11 07:58:03 crc kubenswrapper[5055]: I1011 07:58:03.005640 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98388146-5e1a-4988-967b-0f5fec05e3d3" path="/var/lib/kubelet/pods/98388146-5e1a-4988-967b-0f5fec05e3d3/volumes" Oct 11 07:58:12 crc kubenswrapper[5055]: I1011 07:58:12.994053 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 07:58:13 crc kubenswrapper[5055]: I1011 07:58:13.564463 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"a42d39e15f1d489d40476162a2ce89601697018b156c1d55f198798115254e69"} Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.146533 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2"] Oct 11 08:00:00 crc kubenswrapper[5055]: E1011 08:00:00.147478 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52f5645a-a244-4987-961b-4c22739d0b80" containerName="registry-server" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.147494 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="52f5645a-a244-4987-961b-4c22739d0b80" containerName="registry-server" Oct 11 08:00:00 crc kubenswrapper[5055]: E1011 08:00:00.147514 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98388146-5e1a-4988-967b-0f5fec05e3d3" containerName="extract-content" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.147520 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="98388146-5e1a-4988-967b-0f5fec05e3d3" containerName="extract-content" Oct 11 08:00:00 crc kubenswrapper[5055]: E1011 08:00:00.147534 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98388146-5e1a-4988-967b-0f5fec05e3d3" containerName="registry-server" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.147610 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="98388146-5e1a-4988-967b-0f5fec05e3d3" containerName="registry-server" Oct 11 08:00:00 crc kubenswrapper[5055]: E1011 08:00:00.147626 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52f5645a-a244-4987-961b-4c22739d0b80" containerName="extract-utilities" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.147633 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="52f5645a-a244-4987-961b-4c22739d0b80" containerName="extract-utilities" Oct 11 08:00:00 crc kubenswrapper[5055]: E1011 08:00:00.147642 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52f5645a-a244-4987-961b-4c22739d0b80" containerName="extract-content" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.147650 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="52f5645a-a244-4987-961b-4c22739d0b80" containerName="extract-content" Oct 11 08:00:00 crc kubenswrapper[5055]: E1011 08:00:00.147664 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98388146-5e1a-4988-967b-0f5fec05e3d3" containerName="extract-utilities" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.147671 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="98388146-5e1a-4988-967b-0f5fec05e3d3" containerName="extract-utilities" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.147848 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="98388146-5e1a-4988-967b-0f5fec05e3d3" containerName="registry-server" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.147906 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="52f5645a-a244-4987-961b-4c22739d0b80" containerName="registry-server" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.148473 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.152586 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.152783 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.157898 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2"] Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.179284 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-config-volume\") pod \"collect-profiles-29336160-zdtp2\" (UID: \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.179363 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvz9p\" (UniqueName: \"kubernetes.io/projected/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-kube-api-access-rvz9p\") pod \"collect-profiles-29336160-zdtp2\" (UID: \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.179402 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-secret-volume\") pod \"collect-profiles-29336160-zdtp2\" (UID: \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.281050 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-secret-volume\") pod \"collect-profiles-29336160-zdtp2\" (UID: \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.281107 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvz9p\" (UniqueName: \"kubernetes.io/projected/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-kube-api-access-rvz9p\") pod \"collect-profiles-29336160-zdtp2\" (UID: \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.281228 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-config-volume\") pod \"collect-profiles-29336160-zdtp2\" (UID: \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.282547 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-config-volume\") pod \"collect-profiles-29336160-zdtp2\" (UID: \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.289113 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-secret-volume\") pod \"collect-profiles-29336160-zdtp2\" (UID: \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.297188 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvz9p\" (UniqueName: \"kubernetes.io/projected/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-kube-api-access-rvz9p\") pod \"collect-profiles-29336160-zdtp2\" (UID: \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.473644 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" Oct 11 08:00:00 crc kubenswrapper[5055]: I1011 08:00:00.872716 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2"] Oct 11 08:00:01 crc kubenswrapper[5055]: I1011 08:00:01.294095 5055 generic.go:334] "Generic (PLEG): container finished" podID="38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac" containerID="0a1b5bd95ead33c60c698b396fbc96742f6367a2d9b80f265308f54acf50c108" exitCode=0 Oct 11 08:00:01 crc kubenswrapper[5055]: I1011 08:00:01.294208 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" event={"ID":"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac","Type":"ContainerDied","Data":"0a1b5bd95ead33c60c698b396fbc96742f6367a2d9b80f265308f54acf50c108"} Oct 11 08:00:01 crc kubenswrapper[5055]: I1011 08:00:01.294668 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" event={"ID":"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac","Type":"ContainerStarted","Data":"f7131110b1a0ccb137ccc06b1e53922624e68077d5248323abeeac1ce934dc91"} Oct 11 08:00:02 crc kubenswrapper[5055]: I1011 08:00:02.662500 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" Oct 11 08:00:02 crc kubenswrapper[5055]: I1011 08:00:02.820085 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvz9p\" (UniqueName: \"kubernetes.io/projected/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-kube-api-access-rvz9p\") pod \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\" (UID: \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\") " Oct 11 08:00:02 crc kubenswrapper[5055]: I1011 08:00:02.820201 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-config-volume\") pod \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\" (UID: \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\") " Oct 11 08:00:02 crc kubenswrapper[5055]: I1011 08:00:02.820227 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-secret-volume\") pod \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\" (UID: \"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac\") " Oct 11 08:00:02 crc kubenswrapper[5055]: I1011 08:00:02.820939 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-config-volume" (OuterVolumeSpecName: "config-volume") pod "38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac" (UID: "38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 08:00:02 crc kubenswrapper[5055]: I1011 08:00:02.825233 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-kube-api-access-rvz9p" (OuterVolumeSpecName: "kube-api-access-rvz9p") pod "38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac" (UID: "38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac"). InnerVolumeSpecName "kube-api-access-rvz9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:00:02 crc kubenswrapper[5055]: I1011 08:00:02.825268 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac" (UID: "38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 08:00:02 crc kubenswrapper[5055]: I1011 08:00:02.921312 5055 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 08:00:02 crc kubenswrapper[5055]: I1011 08:00:02.921347 5055 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 08:00:02 crc kubenswrapper[5055]: I1011 08:00:02.921357 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvz9p\" (UniqueName: \"kubernetes.io/projected/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac-kube-api-access-rvz9p\") on node \"crc\" DevicePath \"\"" Oct 11 08:00:03 crc kubenswrapper[5055]: I1011 08:00:03.307618 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" event={"ID":"38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac","Type":"ContainerDied","Data":"f7131110b1a0ccb137ccc06b1e53922624e68077d5248323abeeac1ce934dc91"} Oct 11 08:00:03 crc kubenswrapper[5055]: I1011 08:00:03.307662 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7131110b1a0ccb137ccc06b1e53922624e68077d5248323abeeac1ce934dc91" Oct 11 08:00:03 crc kubenswrapper[5055]: I1011 08:00:03.307667 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2" Oct 11 08:00:03 crc kubenswrapper[5055]: I1011 08:00:03.743590 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72"] Oct 11 08:00:03 crc kubenswrapper[5055]: I1011 08:00:03.749250 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336115-fns72"] Oct 11 08:00:05 crc kubenswrapper[5055]: I1011 08:00:05.001922 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a67a7ab-47f4-4dc8-a49e-249233f62627" path="/var/lib/kubelet/pods/0a67a7ab-47f4-4dc8-a49e-249233f62627/volumes" Oct 11 08:00:32 crc kubenswrapper[5055]: I1011 08:00:32.422569 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:00:32 crc kubenswrapper[5055]: I1011 08:00:32.423530 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:00:53 crc kubenswrapper[5055]: I1011 08:00:53.748097 5055 scope.go:117] "RemoveContainer" containerID="8c416bca97c5a5cb8a7638a9c0d02e9ef2dba2faabf4aceaa45dfa2911788b14" Oct 11 08:00:56 crc kubenswrapper[5055]: I1011 08:00:56.918662 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dwgpf"] Oct 11 08:00:56 crc kubenswrapper[5055]: E1011 08:00:56.919702 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac" containerName="collect-profiles" Oct 11 08:00:56 crc kubenswrapper[5055]: I1011 08:00:56.919757 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac" containerName="collect-profiles" Oct 11 08:00:56 crc kubenswrapper[5055]: I1011 08:00:56.920168 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac" containerName="collect-profiles" Oct 11 08:00:56 crc kubenswrapper[5055]: I1011 08:00:56.923996 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:00:56 crc kubenswrapper[5055]: I1011 08:00:56.924558 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwgpf"] Oct 11 08:00:57 crc kubenswrapper[5055]: I1011 08:00:57.092883 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-utilities\") pod \"redhat-marketplace-dwgpf\" (UID: \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\") " pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:00:57 crc kubenswrapper[5055]: I1011 08:00:57.093216 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-catalog-content\") pod \"redhat-marketplace-dwgpf\" (UID: \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\") " pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:00:57 crc kubenswrapper[5055]: I1011 08:00:57.093298 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ng7nr\" (UniqueName: \"kubernetes.io/projected/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-kube-api-access-ng7nr\") pod \"redhat-marketplace-dwgpf\" (UID: \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\") " pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:00:57 crc kubenswrapper[5055]: I1011 08:00:57.194616 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-utilities\") pod \"redhat-marketplace-dwgpf\" (UID: \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\") " pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:00:57 crc kubenswrapper[5055]: I1011 08:00:57.194671 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-catalog-content\") pod \"redhat-marketplace-dwgpf\" (UID: \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\") " pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:00:57 crc kubenswrapper[5055]: I1011 08:00:57.194740 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ng7nr\" (UniqueName: \"kubernetes.io/projected/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-kube-api-access-ng7nr\") pod \"redhat-marketplace-dwgpf\" (UID: \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\") " pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:00:57 crc kubenswrapper[5055]: I1011 08:00:57.195227 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-utilities\") pod \"redhat-marketplace-dwgpf\" (UID: \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\") " pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:00:57 crc kubenswrapper[5055]: I1011 08:00:57.195246 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-catalog-content\") pod \"redhat-marketplace-dwgpf\" (UID: \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\") " pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:00:57 crc kubenswrapper[5055]: I1011 08:00:57.212520 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ng7nr\" (UniqueName: \"kubernetes.io/projected/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-kube-api-access-ng7nr\") pod \"redhat-marketplace-dwgpf\" (UID: \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\") " pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:00:57 crc kubenswrapper[5055]: I1011 08:00:57.258349 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:00:57 crc kubenswrapper[5055]: I1011 08:00:57.671421 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwgpf"] Oct 11 08:00:57 crc kubenswrapper[5055]: I1011 08:00:57.705528 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwgpf" event={"ID":"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62","Type":"ContainerStarted","Data":"e75b1c5d469f420c18a36d225b0ee9bb4346b7cd8bdcbc9c865505d35100b760"} Oct 11 08:00:58 crc kubenswrapper[5055]: I1011 08:00:58.714458 5055 generic.go:334] "Generic (PLEG): container finished" podID="c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" containerID="5166775df727eae10290ca9eb10814f8b996e14fe34e42c0d0dfca4674a629f0" exitCode=0 Oct 11 08:00:58 crc kubenswrapper[5055]: I1011 08:00:58.714575 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwgpf" event={"ID":"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62","Type":"ContainerDied","Data":"5166775df727eae10290ca9eb10814f8b996e14fe34e42c0d0dfca4674a629f0"} Oct 11 08:00:59 crc kubenswrapper[5055]: I1011 08:00:59.723738 5055 generic.go:334] "Generic (PLEG): container finished" podID="c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" containerID="740a71f8204aff44563748ee129b8a958bafb1d8ef1fc85bcc4fbf9951d8b4b9" exitCode=0 Oct 11 08:00:59 crc kubenswrapper[5055]: I1011 08:00:59.723816 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwgpf" event={"ID":"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62","Type":"ContainerDied","Data":"740a71f8204aff44563748ee129b8a958bafb1d8ef1fc85bcc4fbf9951d8b4b9"} Oct 11 08:01:00 crc kubenswrapper[5055]: I1011 08:01:00.734810 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwgpf" event={"ID":"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62","Type":"ContainerStarted","Data":"26cf3dd536daf35133e2928b87ac64382ba94449c5ff774cf4516c7076751e9d"} Oct 11 08:01:00 crc kubenswrapper[5055]: I1011 08:01:00.753068 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dwgpf" podStartSLOduration=3.309975934 podStartE2EDuration="4.753040773s" podCreationTimestamp="2025-10-11 08:00:56 +0000 UTC" firstStartedPulling="2025-10-11 08:00:58.717460857 +0000 UTC m=+4042.491734664" lastFinishedPulling="2025-10-11 08:01:00.160525696 +0000 UTC m=+4043.934799503" observedRunningTime="2025-10-11 08:01:00.750416929 +0000 UTC m=+4044.524690736" watchObservedRunningTime="2025-10-11 08:01:00.753040773 +0000 UTC m=+4044.527314600" Oct 11 08:01:02 crc kubenswrapper[5055]: I1011 08:01:02.422258 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:01:02 crc kubenswrapper[5055]: I1011 08:01:02.422338 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:01:07 crc kubenswrapper[5055]: I1011 08:01:07.258832 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:01:07 crc kubenswrapper[5055]: I1011 08:01:07.259551 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:01:07 crc kubenswrapper[5055]: I1011 08:01:07.303669 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:01:07 crc kubenswrapper[5055]: I1011 08:01:07.823260 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:01:07 crc kubenswrapper[5055]: I1011 08:01:07.867198 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwgpf"] Oct 11 08:01:09 crc kubenswrapper[5055]: I1011 08:01:09.802485 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dwgpf" podUID="c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" containerName="registry-server" containerID="cri-o://26cf3dd536daf35133e2928b87ac64382ba94449c5ff774cf4516c7076751e9d" gracePeriod=2 Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.245152 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.316379 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-utilities\") pod \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\" (UID: \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\") " Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.316436 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ng7nr\" (UniqueName: \"kubernetes.io/projected/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-kube-api-access-ng7nr\") pod \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\" (UID: \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\") " Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.316533 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-catalog-content\") pod \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\" (UID: \"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62\") " Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.317156 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-utilities" (OuterVolumeSpecName: "utilities") pod "c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" (UID: "c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.321919 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-kube-api-access-ng7nr" (OuterVolumeSpecName: "kube-api-access-ng7nr") pod "c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" (UID: "c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62"). InnerVolumeSpecName "kube-api-access-ng7nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.330277 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" (UID: "c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.417902 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ng7nr\" (UniqueName: \"kubernetes.io/projected/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-kube-api-access-ng7nr\") on node \"crc\" DevicePath \"\"" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.417934 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.417942 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.810635 5055 generic.go:334] "Generic (PLEG): container finished" podID="c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" containerID="26cf3dd536daf35133e2928b87ac64382ba94449c5ff774cf4516c7076751e9d" exitCode=0 Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.810678 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwgpf" event={"ID":"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62","Type":"ContainerDied","Data":"26cf3dd536daf35133e2928b87ac64382ba94449c5ff774cf4516c7076751e9d"} Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.810682 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwgpf" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.810708 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwgpf" event={"ID":"c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62","Type":"ContainerDied","Data":"e75b1c5d469f420c18a36d225b0ee9bb4346b7cd8bdcbc9c865505d35100b760"} Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.810730 5055 scope.go:117] "RemoveContainer" containerID="26cf3dd536daf35133e2928b87ac64382ba94449c5ff774cf4516c7076751e9d" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.830397 5055 scope.go:117] "RemoveContainer" containerID="740a71f8204aff44563748ee129b8a958bafb1d8ef1fc85bcc4fbf9951d8b4b9" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.847733 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwgpf"] Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.853926 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwgpf"] Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.861495 5055 scope.go:117] "RemoveContainer" containerID="5166775df727eae10290ca9eb10814f8b996e14fe34e42c0d0dfca4674a629f0" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.879987 5055 scope.go:117] "RemoveContainer" containerID="26cf3dd536daf35133e2928b87ac64382ba94449c5ff774cf4516c7076751e9d" Oct 11 08:01:10 crc kubenswrapper[5055]: E1011 08:01:10.880615 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26cf3dd536daf35133e2928b87ac64382ba94449c5ff774cf4516c7076751e9d\": container with ID starting with 26cf3dd536daf35133e2928b87ac64382ba94449c5ff774cf4516c7076751e9d not found: ID does not exist" containerID="26cf3dd536daf35133e2928b87ac64382ba94449c5ff774cf4516c7076751e9d" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.880659 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26cf3dd536daf35133e2928b87ac64382ba94449c5ff774cf4516c7076751e9d"} err="failed to get container status \"26cf3dd536daf35133e2928b87ac64382ba94449c5ff774cf4516c7076751e9d\": rpc error: code = NotFound desc = could not find container \"26cf3dd536daf35133e2928b87ac64382ba94449c5ff774cf4516c7076751e9d\": container with ID starting with 26cf3dd536daf35133e2928b87ac64382ba94449c5ff774cf4516c7076751e9d not found: ID does not exist" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.880687 5055 scope.go:117] "RemoveContainer" containerID="740a71f8204aff44563748ee129b8a958bafb1d8ef1fc85bcc4fbf9951d8b4b9" Oct 11 08:01:10 crc kubenswrapper[5055]: E1011 08:01:10.881567 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"740a71f8204aff44563748ee129b8a958bafb1d8ef1fc85bcc4fbf9951d8b4b9\": container with ID starting with 740a71f8204aff44563748ee129b8a958bafb1d8ef1fc85bcc4fbf9951d8b4b9 not found: ID does not exist" containerID="740a71f8204aff44563748ee129b8a958bafb1d8ef1fc85bcc4fbf9951d8b4b9" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.881600 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"740a71f8204aff44563748ee129b8a958bafb1d8ef1fc85bcc4fbf9951d8b4b9"} err="failed to get container status \"740a71f8204aff44563748ee129b8a958bafb1d8ef1fc85bcc4fbf9951d8b4b9\": rpc error: code = NotFound desc = could not find container \"740a71f8204aff44563748ee129b8a958bafb1d8ef1fc85bcc4fbf9951d8b4b9\": container with ID starting with 740a71f8204aff44563748ee129b8a958bafb1d8ef1fc85bcc4fbf9951d8b4b9 not found: ID does not exist" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.881624 5055 scope.go:117] "RemoveContainer" containerID="5166775df727eae10290ca9eb10814f8b996e14fe34e42c0d0dfca4674a629f0" Oct 11 08:01:10 crc kubenswrapper[5055]: E1011 08:01:10.881973 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5166775df727eae10290ca9eb10814f8b996e14fe34e42c0d0dfca4674a629f0\": container with ID starting with 5166775df727eae10290ca9eb10814f8b996e14fe34e42c0d0dfca4674a629f0 not found: ID does not exist" containerID="5166775df727eae10290ca9eb10814f8b996e14fe34e42c0d0dfca4674a629f0" Oct 11 08:01:10 crc kubenswrapper[5055]: I1011 08:01:10.881996 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5166775df727eae10290ca9eb10814f8b996e14fe34e42c0d0dfca4674a629f0"} err="failed to get container status \"5166775df727eae10290ca9eb10814f8b996e14fe34e42c0d0dfca4674a629f0\": rpc error: code = NotFound desc = could not find container \"5166775df727eae10290ca9eb10814f8b996e14fe34e42c0d0dfca4674a629f0\": container with ID starting with 5166775df727eae10290ca9eb10814f8b996e14fe34e42c0d0dfca4674a629f0 not found: ID does not exist" Oct 11 08:01:11 crc kubenswrapper[5055]: I1011 08:01:11.001954 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" path="/var/lib/kubelet/pods/c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62/volumes" Oct 11 08:01:32 crc kubenswrapper[5055]: I1011 08:01:32.422919 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:01:32 crc kubenswrapper[5055]: I1011 08:01:32.423375 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:01:32 crc kubenswrapper[5055]: I1011 08:01:32.423420 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 08:01:32 crc kubenswrapper[5055]: I1011 08:01:32.423928 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a42d39e15f1d489d40476162a2ce89601697018b156c1d55f198798115254e69"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 08:01:32 crc kubenswrapper[5055]: I1011 08:01:32.423988 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://a42d39e15f1d489d40476162a2ce89601697018b156c1d55f198798115254e69" gracePeriod=600 Oct 11 08:01:32 crc kubenswrapper[5055]: I1011 08:01:32.956321 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="a42d39e15f1d489d40476162a2ce89601697018b156c1d55f198798115254e69" exitCode=0 Oct 11 08:01:32 crc kubenswrapper[5055]: I1011 08:01:32.956430 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"a42d39e15f1d489d40476162a2ce89601697018b156c1d55f198798115254e69"} Oct 11 08:01:32 crc kubenswrapper[5055]: I1011 08:01:32.956661 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487"} Oct 11 08:01:32 crc kubenswrapper[5055]: I1011 08:01:32.956684 5055 scope.go:117] "RemoveContainer" containerID="a4a1dd900d4a965384aa65adc020e54e0d2d99bbc26624ec9c6743f9609a6ffa" Oct 11 08:03:32 crc kubenswrapper[5055]: I1011 08:03:32.422657 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:03:32 crc kubenswrapper[5055]: I1011 08:03:32.423203 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:04:02 crc kubenswrapper[5055]: I1011 08:04:02.422690 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:04:02 crc kubenswrapper[5055]: I1011 08:04:02.423299 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:04:22 crc kubenswrapper[5055]: I1011 08:04:22.924256 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8h9v8"] Oct 11 08:04:22 crc kubenswrapper[5055]: E1011 08:04:22.925031 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" containerName="extract-utilities" Oct 11 08:04:22 crc kubenswrapper[5055]: I1011 08:04:22.925044 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" containerName="extract-utilities" Oct 11 08:04:22 crc kubenswrapper[5055]: E1011 08:04:22.925063 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" containerName="registry-server" Oct 11 08:04:22 crc kubenswrapper[5055]: I1011 08:04:22.925069 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" containerName="registry-server" Oct 11 08:04:22 crc kubenswrapper[5055]: E1011 08:04:22.925087 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" containerName="extract-content" Oct 11 08:04:22 crc kubenswrapper[5055]: I1011 08:04:22.925094 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" containerName="extract-content" Oct 11 08:04:22 crc kubenswrapper[5055]: I1011 08:04:22.925230 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4fceb74-8ceb-4cfa-aa49-1e5d6430bc62" containerName="registry-server" Oct 11 08:04:22 crc kubenswrapper[5055]: I1011 08:04:22.926236 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:22 crc kubenswrapper[5055]: I1011 08:04:22.945238 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8h9v8"] Oct 11 08:04:22 crc kubenswrapper[5055]: I1011 08:04:22.947284 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2deec4de-12c1-4baf-b3da-d3a8a067f65a-utilities\") pod \"certified-operators-8h9v8\" (UID: \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\") " pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:22 crc kubenswrapper[5055]: I1011 08:04:22.947386 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssz6m\" (UniqueName: \"kubernetes.io/projected/2deec4de-12c1-4baf-b3da-d3a8a067f65a-kube-api-access-ssz6m\") pod \"certified-operators-8h9v8\" (UID: \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\") " pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:22 crc kubenswrapper[5055]: I1011 08:04:22.947427 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2deec4de-12c1-4baf-b3da-d3a8a067f65a-catalog-content\") pod \"certified-operators-8h9v8\" (UID: \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\") " pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:23 crc kubenswrapper[5055]: I1011 08:04:23.048854 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssz6m\" (UniqueName: \"kubernetes.io/projected/2deec4de-12c1-4baf-b3da-d3a8a067f65a-kube-api-access-ssz6m\") pod \"certified-operators-8h9v8\" (UID: \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\") " pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:23 crc kubenswrapper[5055]: I1011 08:04:23.048912 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2deec4de-12c1-4baf-b3da-d3a8a067f65a-catalog-content\") pod \"certified-operators-8h9v8\" (UID: \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\") " pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:23 crc kubenswrapper[5055]: I1011 08:04:23.048954 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2deec4de-12c1-4baf-b3da-d3a8a067f65a-utilities\") pod \"certified-operators-8h9v8\" (UID: \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\") " pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:23 crc kubenswrapper[5055]: I1011 08:04:23.049421 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2deec4de-12c1-4baf-b3da-d3a8a067f65a-catalog-content\") pod \"certified-operators-8h9v8\" (UID: \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\") " pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:23 crc kubenswrapper[5055]: I1011 08:04:23.049643 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2deec4de-12c1-4baf-b3da-d3a8a067f65a-utilities\") pod \"certified-operators-8h9v8\" (UID: \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\") " pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:23 crc kubenswrapper[5055]: I1011 08:04:23.069606 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssz6m\" (UniqueName: \"kubernetes.io/projected/2deec4de-12c1-4baf-b3da-d3a8a067f65a-kube-api-access-ssz6m\") pod \"certified-operators-8h9v8\" (UID: \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\") " pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:23 crc kubenswrapper[5055]: I1011 08:04:23.255154 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:23 crc kubenswrapper[5055]: I1011 08:04:23.759504 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8h9v8"] Oct 11 08:04:24 crc kubenswrapper[5055]: I1011 08:04:24.176634 5055 generic.go:334] "Generic (PLEG): container finished" podID="2deec4de-12c1-4baf-b3da-d3a8a067f65a" containerID="0040f4cb733d1134c9b6702688024a84ea413938440187978d4be4a7f83e09f3" exitCode=0 Oct 11 08:04:24 crc kubenswrapper[5055]: I1011 08:04:24.176718 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8h9v8" event={"ID":"2deec4de-12c1-4baf-b3da-d3a8a067f65a","Type":"ContainerDied","Data":"0040f4cb733d1134c9b6702688024a84ea413938440187978d4be4a7f83e09f3"} Oct 11 08:04:24 crc kubenswrapper[5055]: I1011 08:04:24.177011 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8h9v8" event={"ID":"2deec4de-12c1-4baf-b3da-d3a8a067f65a","Type":"ContainerStarted","Data":"2d1ede4b30973702039b24908c28daba43da848da15e80fde2ca52ea1373a769"} Oct 11 08:04:24 crc kubenswrapper[5055]: I1011 08:04:24.178931 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 08:04:25 crc kubenswrapper[5055]: I1011 08:04:25.184855 5055 generic.go:334] "Generic (PLEG): container finished" podID="2deec4de-12c1-4baf-b3da-d3a8a067f65a" containerID="f5d02e2a60d1bd8e166cb6c8e1cab84e62d082c213340b4bd032520fe71ba898" exitCode=0 Oct 11 08:04:25 crc kubenswrapper[5055]: I1011 08:04:25.184905 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8h9v8" event={"ID":"2deec4de-12c1-4baf-b3da-d3a8a067f65a","Type":"ContainerDied","Data":"f5d02e2a60d1bd8e166cb6c8e1cab84e62d082c213340b4bd032520fe71ba898"} Oct 11 08:04:26 crc kubenswrapper[5055]: I1011 08:04:26.193040 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8h9v8" event={"ID":"2deec4de-12c1-4baf-b3da-d3a8a067f65a","Type":"ContainerStarted","Data":"43e8ada7d96506a126f75d7c80c74c4e167984d0ce99ce07a4129f3d682e01c9"} Oct 11 08:04:26 crc kubenswrapper[5055]: I1011 08:04:26.213369 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8h9v8" podStartSLOduration=2.7316484819999998 podStartE2EDuration="4.213348571s" podCreationTimestamp="2025-10-11 08:04:22 +0000 UTC" firstStartedPulling="2025-10-11 08:04:24.178117714 +0000 UTC m=+4247.952391511" lastFinishedPulling="2025-10-11 08:04:25.659817793 +0000 UTC m=+4249.434091600" observedRunningTime="2025-10-11 08:04:26.208545005 +0000 UTC m=+4249.982818832" watchObservedRunningTime="2025-10-11 08:04:26.213348571 +0000 UTC m=+4249.987622378" Oct 11 08:04:32 crc kubenswrapper[5055]: I1011 08:04:32.422327 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:04:32 crc kubenswrapper[5055]: I1011 08:04:32.422962 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:04:32 crc kubenswrapper[5055]: I1011 08:04:32.423029 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 08:04:32 crc kubenswrapper[5055]: I1011 08:04:32.423992 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 08:04:32 crc kubenswrapper[5055]: I1011 08:04:32.424097 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" gracePeriod=600 Oct 11 08:04:32 crc kubenswrapper[5055]: E1011 08:04:32.546419 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:04:33 crc kubenswrapper[5055]: I1011 08:04:33.257894 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:33 crc kubenswrapper[5055]: I1011 08:04:33.261650 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:33 crc kubenswrapper[5055]: I1011 08:04:33.270287 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" exitCode=0 Oct 11 08:04:33 crc kubenswrapper[5055]: I1011 08:04:33.270390 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487"} Oct 11 08:04:33 crc kubenswrapper[5055]: I1011 08:04:33.270504 5055 scope.go:117] "RemoveContainer" containerID="a42d39e15f1d489d40476162a2ce89601697018b156c1d55f198798115254e69" Oct 11 08:04:33 crc kubenswrapper[5055]: I1011 08:04:33.271578 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:04:33 crc kubenswrapper[5055]: E1011 08:04:33.272017 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:04:33 crc kubenswrapper[5055]: I1011 08:04:33.352528 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:34 crc kubenswrapper[5055]: I1011 08:04:34.327480 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:34 crc kubenswrapper[5055]: I1011 08:04:34.377786 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8h9v8"] Oct 11 08:04:36 crc kubenswrapper[5055]: I1011 08:04:36.293255 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8h9v8" podUID="2deec4de-12c1-4baf-b3da-d3a8a067f65a" containerName="registry-server" containerID="cri-o://43e8ada7d96506a126f75d7c80c74c4e167984d0ce99ce07a4129f3d682e01c9" gracePeriod=2 Oct 11 08:04:36 crc kubenswrapper[5055]: I1011 08:04:36.677792 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:36 crc kubenswrapper[5055]: I1011 08:04:36.774881 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2deec4de-12c1-4baf-b3da-d3a8a067f65a-catalog-content\") pod \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\" (UID: \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\") " Oct 11 08:04:36 crc kubenswrapper[5055]: I1011 08:04:36.775000 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ssz6m\" (UniqueName: \"kubernetes.io/projected/2deec4de-12c1-4baf-b3da-d3a8a067f65a-kube-api-access-ssz6m\") pod \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\" (UID: \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\") " Oct 11 08:04:36 crc kubenswrapper[5055]: I1011 08:04:36.775038 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2deec4de-12c1-4baf-b3da-d3a8a067f65a-utilities\") pod \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\" (UID: \"2deec4de-12c1-4baf-b3da-d3a8a067f65a\") " Oct 11 08:04:36 crc kubenswrapper[5055]: I1011 08:04:36.776003 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2deec4de-12c1-4baf-b3da-d3a8a067f65a-utilities" (OuterVolumeSpecName: "utilities") pod "2deec4de-12c1-4baf-b3da-d3a8a067f65a" (UID: "2deec4de-12c1-4baf-b3da-d3a8a067f65a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:04:36 crc kubenswrapper[5055]: I1011 08:04:36.782721 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2deec4de-12c1-4baf-b3da-d3a8a067f65a-kube-api-access-ssz6m" (OuterVolumeSpecName: "kube-api-access-ssz6m") pod "2deec4de-12c1-4baf-b3da-d3a8a067f65a" (UID: "2deec4de-12c1-4baf-b3da-d3a8a067f65a"). InnerVolumeSpecName "kube-api-access-ssz6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:04:36 crc kubenswrapper[5055]: I1011 08:04:36.820032 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2deec4de-12c1-4baf-b3da-d3a8a067f65a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2deec4de-12c1-4baf-b3da-d3a8a067f65a" (UID: "2deec4de-12c1-4baf-b3da-d3a8a067f65a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:04:36 crc kubenswrapper[5055]: I1011 08:04:36.876631 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2deec4de-12c1-4baf-b3da-d3a8a067f65a-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:04:36 crc kubenswrapper[5055]: I1011 08:04:36.876668 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ssz6m\" (UniqueName: \"kubernetes.io/projected/2deec4de-12c1-4baf-b3da-d3a8a067f65a-kube-api-access-ssz6m\") on node \"crc\" DevicePath \"\"" Oct 11 08:04:36 crc kubenswrapper[5055]: I1011 08:04:36.876681 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2deec4de-12c1-4baf-b3da-d3a8a067f65a-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.302396 5055 generic.go:334] "Generic (PLEG): container finished" podID="2deec4de-12c1-4baf-b3da-d3a8a067f65a" containerID="43e8ada7d96506a126f75d7c80c74c4e167984d0ce99ce07a4129f3d682e01c9" exitCode=0 Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.302444 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8h9v8" event={"ID":"2deec4de-12c1-4baf-b3da-d3a8a067f65a","Type":"ContainerDied","Data":"43e8ada7d96506a126f75d7c80c74c4e167984d0ce99ce07a4129f3d682e01c9"} Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.302475 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8h9v8" event={"ID":"2deec4de-12c1-4baf-b3da-d3a8a067f65a","Type":"ContainerDied","Data":"2d1ede4b30973702039b24908c28daba43da848da15e80fde2ca52ea1373a769"} Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.302493 5055 scope.go:117] "RemoveContainer" containerID="43e8ada7d96506a126f75d7c80c74c4e167984d0ce99ce07a4129f3d682e01c9" Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.302617 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8h9v8" Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.326473 5055 scope.go:117] "RemoveContainer" containerID="f5d02e2a60d1bd8e166cb6c8e1cab84e62d082c213340b4bd032520fe71ba898" Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.328546 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8h9v8"] Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.334377 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8h9v8"] Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.348826 5055 scope.go:117] "RemoveContainer" containerID="0040f4cb733d1134c9b6702688024a84ea413938440187978d4be4a7f83e09f3" Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.370231 5055 scope.go:117] "RemoveContainer" containerID="43e8ada7d96506a126f75d7c80c74c4e167984d0ce99ce07a4129f3d682e01c9" Oct 11 08:04:37 crc kubenswrapper[5055]: E1011 08:04:37.370712 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43e8ada7d96506a126f75d7c80c74c4e167984d0ce99ce07a4129f3d682e01c9\": container with ID starting with 43e8ada7d96506a126f75d7c80c74c4e167984d0ce99ce07a4129f3d682e01c9 not found: ID does not exist" containerID="43e8ada7d96506a126f75d7c80c74c4e167984d0ce99ce07a4129f3d682e01c9" Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.370781 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43e8ada7d96506a126f75d7c80c74c4e167984d0ce99ce07a4129f3d682e01c9"} err="failed to get container status \"43e8ada7d96506a126f75d7c80c74c4e167984d0ce99ce07a4129f3d682e01c9\": rpc error: code = NotFound desc = could not find container \"43e8ada7d96506a126f75d7c80c74c4e167984d0ce99ce07a4129f3d682e01c9\": container with ID starting with 43e8ada7d96506a126f75d7c80c74c4e167984d0ce99ce07a4129f3d682e01c9 not found: ID does not exist" Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.370814 5055 scope.go:117] "RemoveContainer" containerID="f5d02e2a60d1bd8e166cb6c8e1cab84e62d082c213340b4bd032520fe71ba898" Oct 11 08:04:37 crc kubenswrapper[5055]: E1011 08:04:37.371115 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5d02e2a60d1bd8e166cb6c8e1cab84e62d082c213340b4bd032520fe71ba898\": container with ID starting with f5d02e2a60d1bd8e166cb6c8e1cab84e62d082c213340b4bd032520fe71ba898 not found: ID does not exist" containerID="f5d02e2a60d1bd8e166cb6c8e1cab84e62d082c213340b4bd032520fe71ba898" Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.371140 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5d02e2a60d1bd8e166cb6c8e1cab84e62d082c213340b4bd032520fe71ba898"} err="failed to get container status \"f5d02e2a60d1bd8e166cb6c8e1cab84e62d082c213340b4bd032520fe71ba898\": rpc error: code = NotFound desc = could not find container \"f5d02e2a60d1bd8e166cb6c8e1cab84e62d082c213340b4bd032520fe71ba898\": container with ID starting with f5d02e2a60d1bd8e166cb6c8e1cab84e62d082c213340b4bd032520fe71ba898 not found: ID does not exist" Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.371155 5055 scope.go:117] "RemoveContainer" containerID="0040f4cb733d1134c9b6702688024a84ea413938440187978d4be4a7f83e09f3" Oct 11 08:04:37 crc kubenswrapper[5055]: E1011 08:04:37.371317 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0040f4cb733d1134c9b6702688024a84ea413938440187978d4be4a7f83e09f3\": container with ID starting with 0040f4cb733d1134c9b6702688024a84ea413938440187978d4be4a7f83e09f3 not found: ID does not exist" containerID="0040f4cb733d1134c9b6702688024a84ea413938440187978d4be4a7f83e09f3" Oct 11 08:04:37 crc kubenswrapper[5055]: I1011 08:04:37.371342 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0040f4cb733d1134c9b6702688024a84ea413938440187978d4be4a7f83e09f3"} err="failed to get container status \"0040f4cb733d1134c9b6702688024a84ea413938440187978d4be4a7f83e09f3\": rpc error: code = NotFound desc = could not find container \"0040f4cb733d1134c9b6702688024a84ea413938440187978d4be4a7f83e09f3\": container with ID starting with 0040f4cb733d1134c9b6702688024a84ea413938440187978d4be4a7f83e09f3 not found: ID does not exist" Oct 11 08:04:39 crc kubenswrapper[5055]: I1011 08:04:39.008941 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2deec4de-12c1-4baf-b3da-d3a8a067f65a" path="/var/lib/kubelet/pods/2deec4de-12c1-4baf-b3da-d3a8a067f65a/volumes" Oct 11 08:04:43 crc kubenswrapper[5055]: I1011 08:04:43.994406 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:04:43 crc kubenswrapper[5055]: E1011 08:04:43.994595 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:04:57 crc kubenswrapper[5055]: I1011 08:04:57.994636 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:04:57 crc kubenswrapper[5055]: E1011 08:04:57.995365 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:05:11 crc kubenswrapper[5055]: I1011 08:05:11.993996 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:05:11 crc kubenswrapper[5055]: E1011 08:05:11.996888 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:05:23 crc kubenswrapper[5055]: I1011 08:05:23.993337 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:05:23 crc kubenswrapper[5055]: E1011 08:05:23.994055 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:05:35 crc kubenswrapper[5055]: I1011 08:05:35.993390 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:05:35 crc kubenswrapper[5055]: E1011 08:05:35.994023 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:05:48 crc kubenswrapper[5055]: I1011 08:05:48.993473 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:05:48 crc kubenswrapper[5055]: E1011 08:05:48.994177 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:06:01 crc kubenswrapper[5055]: I1011 08:06:01.993193 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:06:01 crc kubenswrapper[5055]: E1011 08:06:01.993923 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:06:16 crc kubenswrapper[5055]: I1011 08:06:16.996651 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:06:16 crc kubenswrapper[5055]: E1011 08:06:16.997518 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:06:27 crc kubenswrapper[5055]: I1011 08:06:27.993552 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:06:27 crc kubenswrapper[5055]: E1011 08:06:27.994320 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:06:40 crc kubenswrapper[5055]: I1011 08:06:40.993365 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:06:40 crc kubenswrapper[5055]: E1011 08:06:40.994059 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:06:51 crc kubenswrapper[5055]: I1011 08:06:51.993041 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:06:51 crc kubenswrapper[5055]: E1011 08:06:51.993980 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:07:04 crc kubenswrapper[5055]: I1011 08:07:04.994939 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:07:04 crc kubenswrapper[5055]: E1011 08:07:04.995938 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:07:19 crc kubenswrapper[5055]: I1011 08:07:19.994037 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:07:19 crc kubenswrapper[5055]: E1011 08:07:19.994587 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:07:33 crc kubenswrapper[5055]: I1011 08:07:33.993120 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:07:33 crc kubenswrapper[5055]: E1011 08:07:33.993736 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:07:45 crc kubenswrapper[5055]: I1011 08:07:45.993428 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:07:45 crc kubenswrapper[5055]: E1011 08:07:45.994848 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:08:00 crc kubenswrapper[5055]: I1011 08:08:00.993971 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:08:00 crc kubenswrapper[5055]: E1011 08:08:00.994800 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:08:12 crc kubenswrapper[5055]: I1011 08:08:12.993085 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:08:12 crc kubenswrapper[5055]: E1011 08:08:12.993914 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:08:27 crc kubenswrapper[5055]: I1011 08:08:27.993715 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:08:27 crc kubenswrapper[5055]: E1011 08:08:27.994533 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:08:38 crc kubenswrapper[5055]: I1011 08:08:38.994000 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:08:38 crc kubenswrapper[5055]: E1011 08:08:38.994988 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:08:52 crc kubenswrapper[5055]: I1011 08:08:52.993563 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:08:52 crc kubenswrapper[5055]: E1011 08:08:52.994417 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:09:06 crc kubenswrapper[5055]: I1011 08:09:06.997015 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:09:06 crc kubenswrapper[5055]: E1011 08:09:06.999266 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:09:20 crc kubenswrapper[5055]: I1011 08:09:20.994174 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:09:20 crc kubenswrapper[5055]: E1011 08:09:20.994910 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:09:33 crc kubenswrapper[5055]: I1011 08:09:33.994090 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:09:34 crc kubenswrapper[5055]: I1011 08:09:34.418090 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"fab87695f69f6d5fb61c1a9b4d1592ef4a3ce9074a78571caec9a3d47a6bf3ff"} Oct 11 08:10:28 crc kubenswrapper[5055]: I1011 08:10:28.924377 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-q6f6q"] Oct 11 08:10:28 crc kubenswrapper[5055]: E1011 08:10:28.925243 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2deec4de-12c1-4baf-b3da-d3a8a067f65a" containerName="registry-server" Oct 11 08:10:28 crc kubenswrapper[5055]: I1011 08:10:28.925260 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="2deec4de-12c1-4baf-b3da-d3a8a067f65a" containerName="registry-server" Oct 11 08:10:28 crc kubenswrapper[5055]: E1011 08:10:28.925293 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2deec4de-12c1-4baf-b3da-d3a8a067f65a" containerName="extract-utilities" Oct 11 08:10:28 crc kubenswrapper[5055]: I1011 08:10:28.925303 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="2deec4de-12c1-4baf-b3da-d3a8a067f65a" containerName="extract-utilities" Oct 11 08:10:28 crc kubenswrapper[5055]: E1011 08:10:28.925319 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2deec4de-12c1-4baf-b3da-d3a8a067f65a" containerName="extract-content" Oct 11 08:10:28 crc kubenswrapper[5055]: I1011 08:10:28.925327 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="2deec4de-12c1-4baf-b3da-d3a8a067f65a" containerName="extract-content" Oct 11 08:10:28 crc kubenswrapper[5055]: I1011 08:10:28.925495 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="2deec4de-12c1-4baf-b3da-d3a8a067f65a" containerName="registry-server" Oct 11 08:10:28 crc kubenswrapper[5055]: I1011 08:10:28.926661 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:28 crc kubenswrapper[5055]: I1011 08:10:28.937198 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q6f6q"] Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.022290 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bedff40b-f794-4a94-93b7-7c32f7656278-utilities\") pod \"community-operators-q6f6q\" (UID: \"bedff40b-f794-4a94-93b7-7c32f7656278\") " pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.022385 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l76ct\" (UniqueName: \"kubernetes.io/projected/bedff40b-f794-4a94-93b7-7c32f7656278-kube-api-access-l76ct\") pod \"community-operators-q6f6q\" (UID: \"bedff40b-f794-4a94-93b7-7c32f7656278\") " pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.022445 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bedff40b-f794-4a94-93b7-7c32f7656278-catalog-content\") pod \"community-operators-q6f6q\" (UID: \"bedff40b-f794-4a94-93b7-7c32f7656278\") " pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.111980 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9rp2z"] Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.113284 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.123974 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bedff40b-f794-4a94-93b7-7c32f7656278-catalog-content\") pod \"community-operators-q6f6q\" (UID: \"bedff40b-f794-4a94-93b7-7c32f7656278\") " pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.124544 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bedff40b-f794-4a94-93b7-7c32f7656278-catalog-content\") pod \"community-operators-q6f6q\" (UID: \"bedff40b-f794-4a94-93b7-7c32f7656278\") " pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.124674 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pcxft\" (UniqueName: \"kubernetes.io/projected/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-kube-api-access-pcxft\") pod \"redhat-operators-9rp2z\" (UID: \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\") " pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.124694 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-catalog-content\") pod \"redhat-operators-9rp2z\" (UID: \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\") " pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.124746 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bedff40b-f794-4a94-93b7-7c32f7656278-utilities\") pod \"community-operators-q6f6q\" (UID: \"bedff40b-f794-4a94-93b7-7c32f7656278\") " pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.124834 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l76ct\" (UniqueName: \"kubernetes.io/projected/bedff40b-f794-4a94-93b7-7c32f7656278-kube-api-access-l76ct\") pod \"community-operators-q6f6q\" (UID: \"bedff40b-f794-4a94-93b7-7c32f7656278\") " pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.124882 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-utilities\") pod \"redhat-operators-9rp2z\" (UID: \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\") " pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.125104 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bedff40b-f794-4a94-93b7-7c32f7656278-utilities\") pod \"community-operators-q6f6q\" (UID: \"bedff40b-f794-4a94-93b7-7c32f7656278\") " pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.130131 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9rp2z"] Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.148161 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l76ct\" (UniqueName: \"kubernetes.io/projected/bedff40b-f794-4a94-93b7-7c32f7656278-kube-api-access-l76ct\") pod \"community-operators-q6f6q\" (UID: \"bedff40b-f794-4a94-93b7-7c32f7656278\") " pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.226085 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-utilities\") pod \"redhat-operators-9rp2z\" (UID: \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\") " pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.226151 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pcxft\" (UniqueName: \"kubernetes.io/projected/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-kube-api-access-pcxft\") pod \"redhat-operators-9rp2z\" (UID: \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\") " pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.226209 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-catalog-content\") pod \"redhat-operators-9rp2z\" (UID: \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\") " pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.226723 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-catalog-content\") pod \"redhat-operators-9rp2z\" (UID: \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\") " pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.226908 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-utilities\") pod \"redhat-operators-9rp2z\" (UID: \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\") " pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.257314 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.257617 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pcxft\" (UniqueName: \"kubernetes.io/projected/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-kube-api-access-pcxft\") pod \"redhat-operators-9rp2z\" (UID: \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\") " pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.439518 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.572625 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q6f6q"] Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.727015 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9rp2z"] Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.811964 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9rp2z" event={"ID":"6e5b2553-a3b6-441b-89b3-1e9583a4e44f","Type":"ContainerStarted","Data":"17494c361d2a1ac76e120bf18426fbb456d7c1f31cdf64cab020f90e8d7bbb60"} Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.815289 5055 generic.go:334] "Generic (PLEG): container finished" podID="bedff40b-f794-4a94-93b7-7c32f7656278" containerID="02f49cf5d488c6cd7c2e1b7f99979b13da636a8e07bdc1fa5592a6089e615dbf" exitCode=0 Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.815333 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q6f6q" event={"ID":"bedff40b-f794-4a94-93b7-7c32f7656278","Type":"ContainerDied","Data":"02f49cf5d488c6cd7c2e1b7f99979b13da636a8e07bdc1fa5592a6089e615dbf"} Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.815355 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q6f6q" event={"ID":"bedff40b-f794-4a94-93b7-7c32f7656278","Type":"ContainerStarted","Data":"1d6b988a34f958aac951638d94a5c88c471fda6785dcf732579ea85bc597c582"} Oct 11 08:10:29 crc kubenswrapper[5055]: I1011 08:10:29.817117 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 08:10:30 crc kubenswrapper[5055]: I1011 08:10:30.823510 5055 generic.go:334] "Generic (PLEG): container finished" podID="bedff40b-f794-4a94-93b7-7c32f7656278" containerID="b4d1eb5cc021c1a514d424f91b9c3ba6dcbe4506499caff7fad9d1c27b014d89" exitCode=0 Oct 11 08:10:30 crc kubenswrapper[5055]: I1011 08:10:30.823557 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q6f6q" event={"ID":"bedff40b-f794-4a94-93b7-7c32f7656278","Type":"ContainerDied","Data":"b4d1eb5cc021c1a514d424f91b9c3ba6dcbe4506499caff7fad9d1c27b014d89"} Oct 11 08:10:30 crc kubenswrapper[5055]: I1011 08:10:30.825789 5055 generic.go:334] "Generic (PLEG): container finished" podID="6e5b2553-a3b6-441b-89b3-1e9583a4e44f" containerID="265dbe646aab8c3eab65faf416226887b3af1b87ce07bf4d70566318eae523ea" exitCode=0 Oct 11 08:10:30 crc kubenswrapper[5055]: I1011 08:10:30.825827 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9rp2z" event={"ID":"6e5b2553-a3b6-441b-89b3-1e9583a4e44f","Type":"ContainerDied","Data":"265dbe646aab8c3eab65faf416226887b3af1b87ce07bf4d70566318eae523ea"} Oct 11 08:10:31 crc kubenswrapper[5055]: I1011 08:10:31.842665 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9rp2z" event={"ID":"6e5b2553-a3b6-441b-89b3-1e9583a4e44f","Type":"ContainerStarted","Data":"835bb5ef8da4534d04842ee66b88a430e2dd8f95170575bad09d3fddb7a6e814"} Oct 11 08:10:31 crc kubenswrapper[5055]: I1011 08:10:31.845121 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q6f6q" event={"ID":"bedff40b-f794-4a94-93b7-7c32f7656278","Type":"ContainerStarted","Data":"2266117472fcc5bcda4d05cea19a4c3c8e2f18699f29436dc46c8b2dd16405cb"} Oct 11 08:10:31 crc kubenswrapper[5055]: I1011 08:10:31.892290 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-q6f6q" podStartSLOduration=2.458857424 podStartE2EDuration="3.89227257s" podCreationTimestamp="2025-10-11 08:10:28 +0000 UTC" firstStartedPulling="2025-10-11 08:10:29.816650414 +0000 UTC m=+4613.590924221" lastFinishedPulling="2025-10-11 08:10:31.25006556 +0000 UTC m=+4615.024339367" observedRunningTime="2025-10-11 08:10:31.88734113 +0000 UTC m=+4615.661614937" watchObservedRunningTime="2025-10-11 08:10:31.89227257 +0000 UTC m=+4615.666546377" Oct 11 08:10:32 crc kubenswrapper[5055]: I1011 08:10:32.854653 5055 generic.go:334] "Generic (PLEG): container finished" podID="6e5b2553-a3b6-441b-89b3-1e9583a4e44f" containerID="835bb5ef8da4534d04842ee66b88a430e2dd8f95170575bad09d3fddb7a6e814" exitCode=0 Oct 11 08:10:32 crc kubenswrapper[5055]: I1011 08:10:32.854712 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9rp2z" event={"ID":"6e5b2553-a3b6-441b-89b3-1e9583a4e44f","Type":"ContainerDied","Data":"835bb5ef8da4534d04842ee66b88a430e2dd8f95170575bad09d3fddb7a6e814"} Oct 11 08:10:33 crc kubenswrapper[5055]: I1011 08:10:33.866238 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9rp2z" event={"ID":"6e5b2553-a3b6-441b-89b3-1e9583a4e44f","Type":"ContainerStarted","Data":"6037a635cd0ba40abf66c52144c6f473ea23e00cc71a0e1531d8744cfa8e181c"} Oct 11 08:10:39 crc kubenswrapper[5055]: I1011 08:10:39.258429 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:39 crc kubenswrapper[5055]: I1011 08:10:39.259411 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:39 crc kubenswrapper[5055]: I1011 08:10:39.299107 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:39 crc kubenswrapper[5055]: I1011 08:10:39.314645 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9rp2z" podStartSLOduration=7.452811249 podStartE2EDuration="10.314628548s" podCreationTimestamp="2025-10-11 08:10:29 +0000 UTC" firstStartedPulling="2025-10-11 08:10:30.826793355 +0000 UTC m=+4614.601067162" lastFinishedPulling="2025-10-11 08:10:33.688610654 +0000 UTC m=+4617.462884461" observedRunningTime="2025-10-11 08:10:33.882114736 +0000 UTC m=+4617.656388553" watchObservedRunningTime="2025-10-11 08:10:39.314628548 +0000 UTC m=+4623.088902355" Oct 11 08:10:39 crc kubenswrapper[5055]: I1011 08:10:39.441052 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:39 crc kubenswrapper[5055]: I1011 08:10:39.441087 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:39 crc kubenswrapper[5055]: I1011 08:10:39.476077 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:39 crc kubenswrapper[5055]: I1011 08:10:39.967005 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:39 crc kubenswrapper[5055]: I1011 08:10:39.972777 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:40 crc kubenswrapper[5055]: I1011 08:10:40.934015 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9rp2z"] Oct 11 08:10:41 crc kubenswrapper[5055]: I1011 08:10:41.938746 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9rp2z" podUID="6e5b2553-a3b6-441b-89b3-1e9583a4e44f" containerName="registry-server" containerID="cri-o://6037a635cd0ba40abf66c52144c6f473ea23e00cc71a0e1531d8744cfa8e181c" gracePeriod=2 Oct 11 08:10:42 crc kubenswrapper[5055]: I1011 08:10:42.330945 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q6f6q"] Oct 11 08:10:42 crc kubenswrapper[5055]: I1011 08:10:42.331163 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-q6f6q" podUID="bedff40b-f794-4a94-93b7-7c32f7656278" containerName="registry-server" containerID="cri-o://2266117472fcc5bcda4d05cea19a4c3c8e2f18699f29436dc46c8b2dd16405cb" gracePeriod=2 Oct 11 08:10:42 crc kubenswrapper[5055]: I1011 08:10:42.946618 5055 generic.go:334] "Generic (PLEG): container finished" podID="bedff40b-f794-4a94-93b7-7c32f7656278" containerID="2266117472fcc5bcda4d05cea19a4c3c8e2f18699f29436dc46c8b2dd16405cb" exitCode=0 Oct 11 08:10:42 crc kubenswrapper[5055]: I1011 08:10:42.946803 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q6f6q" event={"ID":"bedff40b-f794-4a94-93b7-7c32f7656278","Type":"ContainerDied","Data":"2266117472fcc5bcda4d05cea19a4c3c8e2f18699f29436dc46c8b2dd16405cb"} Oct 11 08:10:42 crc kubenswrapper[5055]: I1011 08:10:42.950381 5055 generic.go:334] "Generic (PLEG): container finished" podID="6e5b2553-a3b6-441b-89b3-1e9583a4e44f" containerID="6037a635cd0ba40abf66c52144c6f473ea23e00cc71a0e1531d8744cfa8e181c" exitCode=0 Oct 11 08:10:42 crc kubenswrapper[5055]: I1011 08:10:42.950435 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9rp2z" event={"ID":"6e5b2553-a3b6-441b-89b3-1e9583a4e44f","Type":"ContainerDied","Data":"6037a635cd0ba40abf66c52144c6f473ea23e00cc71a0e1531d8744cfa8e181c"} Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.210003 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.324228 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l76ct\" (UniqueName: \"kubernetes.io/projected/bedff40b-f794-4a94-93b7-7c32f7656278-kube-api-access-l76ct\") pod \"bedff40b-f794-4a94-93b7-7c32f7656278\" (UID: \"bedff40b-f794-4a94-93b7-7c32f7656278\") " Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.324340 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bedff40b-f794-4a94-93b7-7c32f7656278-catalog-content\") pod \"bedff40b-f794-4a94-93b7-7c32f7656278\" (UID: \"bedff40b-f794-4a94-93b7-7c32f7656278\") " Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.324392 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bedff40b-f794-4a94-93b7-7c32f7656278-utilities\") pod \"bedff40b-f794-4a94-93b7-7c32f7656278\" (UID: \"bedff40b-f794-4a94-93b7-7c32f7656278\") " Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.325477 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bedff40b-f794-4a94-93b7-7c32f7656278-utilities" (OuterVolumeSpecName: "utilities") pod "bedff40b-f794-4a94-93b7-7c32f7656278" (UID: "bedff40b-f794-4a94-93b7-7c32f7656278"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.334458 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bedff40b-f794-4a94-93b7-7c32f7656278-kube-api-access-l76ct" (OuterVolumeSpecName: "kube-api-access-l76ct") pod "bedff40b-f794-4a94-93b7-7c32f7656278" (UID: "bedff40b-f794-4a94-93b7-7c32f7656278"). InnerVolumeSpecName "kube-api-access-l76ct". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.375908 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bedff40b-f794-4a94-93b7-7c32f7656278-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bedff40b-f794-4a94-93b7-7c32f7656278" (UID: "bedff40b-f794-4a94-93b7-7c32f7656278"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.385187 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.425937 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-catalog-content\") pod \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\" (UID: \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\") " Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.425993 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-utilities\") pod \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\" (UID: \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\") " Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.426079 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxft\" (UniqueName: \"kubernetes.io/projected/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-kube-api-access-pcxft\") pod \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\" (UID: \"6e5b2553-a3b6-441b-89b3-1e9583a4e44f\") " Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.426371 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l76ct\" (UniqueName: \"kubernetes.io/projected/bedff40b-f794-4a94-93b7-7c32f7656278-kube-api-access-l76ct\") on node \"crc\" DevicePath \"\"" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.426385 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bedff40b-f794-4a94-93b7-7c32f7656278-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.426395 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bedff40b-f794-4a94-93b7-7c32f7656278-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.427683 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-utilities" (OuterVolumeSpecName: "utilities") pod "6e5b2553-a3b6-441b-89b3-1e9583a4e44f" (UID: "6e5b2553-a3b6-441b-89b3-1e9583a4e44f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.429248 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-kube-api-access-pcxft" (OuterVolumeSpecName: "kube-api-access-pcxft") pod "6e5b2553-a3b6-441b-89b3-1e9583a4e44f" (UID: "6e5b2553-a3b6-441b-89b3-1e9583a4e44f"). InnerVolumeSpecName "kube-api-access-pcxft". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.510598 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6e5b2553-a3b6-441b-89b3-1e9583a4e44f" (UID: "6e5b2553-a3b6-441b-89b3-1e9583a4e44f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.527434 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxft\" (UniqueName: \"kubernetes.io/projected/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-kube-api-access-pcxft\") on node \"crc\" DevicePath \"\"" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.527472 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.527481 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e5b2553-a3b6-441b-89b3-1e9583a4e44f-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.959721 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q6f6q" event={"ID":"bedff40b-f794-4a94-93b7-7c32f7656278","Type":"ContainerDied","Data":"1d6b988a34f958aac951638d94a5c88c471fda6785dcf732579ea85bc597c582"} Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.959800 5055 scope.go:117] "RemoveContainer" containerID="2266117472fcc5bcda4d05cea19a4c3c8e2f18699f29436dc46c8b2dd16405cb" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.959912 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q6f6q" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.964752 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9rp2z" event={"ID":"6e5b2553-a3b6-441b-89b3-1e9583a4e44f","Type":"ContainerDied","Data":"17494c361d2a1ac76e120bf18426fbb456d7c1f31cdf64cab020f90e8d7bbb60"} Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.964817 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9rp2z" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.977716 5055 scope.go:117] "RemoveContainer" containerID="b4d1eb5cc021c1a514d424f91b9c3ba6dcbe4506499caff7fad9d1c27b014d89" Oct 11 08:10:43 crc kubenswrapper[5055]: I1011 08:10:43.999017 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q6f6q"] Oct 11 08:10:44 crc kubenswrapper[5055]: I1011 08:10:44.005178 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-q6f6q"] Oct 11 08:10:44 crc kubenswrapper[5055]: I1011 08:10:44.010640 5055 scope.go:117] "RemoveContainer" containerID="02f49cf5d488c6cd7c2e1b7f99979b13da636a8e07bdc1fa5592a6089e615dbf" Oct 11 08:10:44 crc kubenswrapper[5055]: I1011 08:10:44.013377 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9rp2z"] Oct 11 08:10:44 crc kubenswrapper[5055]: I1011 08:10:44.018276 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9rp2z"] Oct 11 08:10:44 crc kubenswrapper[5055]: I1011 08:10:44.026642 5055 scope.go:117] "RemoveContainer" containerID="6037a635cd0ba40abf66c52144c6f473ea23e00cc71a0e1531d8744cfa8e181c" Oct 11 08:10:44 crc kubenswrapper[5055]: I1011 08:10:44.041952 5055 scope.go:117] "RemoveContainer" containerID="835bb5ef8da4534d04842ee66b88a430e2dd8f95170575bad09d3fddb7a6e814" Oct 11 08:10:44 crc kubenswrapper[5055]: I1011 08:10:44.055949 5055 scope.go:117] "RemoveContainer" containerID="265dbe646aab8c3eab65faf416226887b3af1b87ce07bf4d70566318eae523ea" Oct 11 08:10:45 crc kubenswrapper[5055]: I1011 08:10:45.009206 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e5b2553-a3b6-441b-89b3-1e9583a4e44f" path="/var/lib/kubelet/pods/6e5b2553-a3b6-441b-89b3-1e9583a4e44f/volumes" Oct 11 08:10:45 crc kubenswrapper[5055]: I1011 08:10:45.010596 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bedff40b-f794-4a94-93b7-7c32f7656278" path="/var/lib/kubelet/pods/bedff40b-f794-4a94-93b7-7c32f7656278/volumes" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.789062 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fv5dz"] Oct 11 08:11:14 crc kubenswrapper[5055]: E1011 08:11:14.789989 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e5b2553-a3b6-441b-89b3-1e9583a4e44f" containerName="registry-server" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.790006 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e5b2553-a3b6-441b-89b3-1e9583a4e44f" containerName="registry-server" Oct 11 08:11:14 crc kubenswrapper[5055]: E1011 08:11:14.790017 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e5b2553-a3b6-441b-89b3-1e9583a4e44f" containerName="extract-utilities" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.790026 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e5b2553-a3b6-441b-89b3-1e9583a4e44f" containerName="extract-utilities" Oct 11 08:11:14 crc kubenswrapper[5055]: E1011 08:11:14.790046 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bedff40b-f794-4a94-93b7-7c32f7656278" containerName="extract-utilities" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.790077 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="bedff40b-f794-4a94-93b7-7c32f7656278" containerName="extract-utilities" Oct 11 08:11:14 crc kubenswrapper[5055]: E1011 08:11:14.790091 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bedff40b-f794-4a94-93b7-7c32f7656278" containerName="extract-content" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.790099 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="bedff40b-f794-4a94-93b7-7c32f7656278" containerName="extract-content" Oct 11 08:11:14 crc kubenswrapper[5055]: E1011 08:11:14.790118 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e5b2553-a3b6-441b-89b3-1e9583a4e44f" containerName="extract-content" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.790125 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e5b2553-a3b6-441b-89b3-1e9583a4e44f" containerName="extract-content" Oct 11 08:11:14 crc kubenswrapper[5055]: E1011 08:11:14.790146 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bedff40b-f794-4a94-93b7-7c32f7656278" containerName="registry-server" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.790153 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="bedff40b-f794-4a94-93b7-7c32f7656278" containerName="registry-server" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.790316 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="bedff40b-f794-4a94-93b7-7c32f7656278" containerName="registry-server" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.790346 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e5b2553-a3b6-441b-89b3-1e9583a4e44f" containerName="registry-server" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.791455 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.811484 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fv5dz"] Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.869273 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca09096e-8fb1-458b-bf23-86c9b8e30de6-catalog-content\") pod \"redhat-marketplace-fv5dz\" (UID: \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\") " pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.869325 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca09096e-8fb1-458b-bf23-86c9b8e30de6-utilities\") pod \"redhat-marketplace-fv5dz\" (UID: \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\") " pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.869370 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxdj2\" (UniqueName: \"kubernetes.io/projected/ca09096e-8fb1-458b-bf23-86c9b8e30de6-kube-api-access-wxdj2\") pod \"redhat-marketplace-fv5dz\" (UID: \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\") " pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.970451 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca09096e-8fb1-458b-bf23-86c9b8e30de6-catalog-content\") pod \"redhat-marketplace-fv5dz\" (UID: \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\") " pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.970522 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca09096e-8fb1-458b-bf23-86c9b8e30de6-utilities\") pod \"redhat-marketplace-fv5dz\" (UID: \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\") " pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.970578 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxdj2\" (UniqueName: \"kubernetes.io/projected/ca09096e-8fb1-458b-bf23-86c9b8e30de6-kube-api-access-wxdj2\") pod \"redhat-marketplace-fv5dz\" (UID: \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\") " pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.971135 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca09096e-8fb1-458b-bf23-86c9b8e30de6-catalog-content\") pod \"redhat-marketplace-fv5dz\" (UID: \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\") " pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.971176 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca09096e-8fb1-458b-bf23-86c9b8e30de6-utilities\") pod \"redhat-marketplace-fv5dz\" (UID: \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\") " pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:14 crc kubenswrapper[5055]: I1011 08:11:14.996425 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxdj2\" (UniqueName: \"kubernetes.io/projected/ca09096e-8fb1-458b-bf23-86c9b8e30de6-kube-api-access-wxdj2\") pod \"redhat-marketplace-fv5dz\" (UID: \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\") " pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:15 crc kubenswrapper[5055]: I1011 08:11:15.110721 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:15 crc kubenswrapper[5055]: I1011 08:11:15.560613 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fv5dz"] Oct 11 08:11:16 crc kubenswrapper[5055]: I1011 08:11:16.190025 5055 generic.go:334] "Generic (PLEG): container finished" podID="ca09096e-8fb1-458b-bf23-86c9b8e30de6" containerID="349609469ffb6bcf955c72194fdfcead1ca95b0120115f6ba646d739f7d14dea" exitCode=0 Oct 11 08:11:16 crc kubenswrapper[5055]: I1011 08:11:16.190102 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fv5dz" event={"ID":"ca09096e-8fb1-458b-bf23-86c9b8e30de6","Type":"ContainerDied","Data":"349609469ffb6bcf955c72194fdfcead1ca95b0120115f6ba646d739f7d14dea"} Oct 11 08:11:16 crc kubenswrapper[5055]: I1011 08:11:16.190136 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fv5dz" event={"ID":"ca09096e-8fb1-458b-bf23-86c9b8e30de6","Type":"ContainerStarted","Data":"61c48a9da091e18adc1d104c702fbd5b878e4d5b4391aeea45d69fb7130efeec"} Oct 11 08:11:18 crc kubenswrapper[5055]: I1011 08:11:18.204019 5055 generic.go:334] "Generic (PLEG): container finished" podID="ca09096e-8fb1-458b-bf23-86c9b8e30de6" containerID="bf23b41a95b79c9bdb33af32e4fef8e0e3835c28861d5b89228e8f691365439a" exitCode=0 Oct 11 08:11:18 crc kubenswrapper[5055]: I1011 08:11:18.204093 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fv5dz" event={"ID":"ca09096e-8fb1-458b-bf23-86c9b8e30de6","Type":"ContainerDied","Data":"bf23b41a95b79c9bdb33af32e4fef8e0e3835c28861d5b89228e8f691365439a"} Oct 11 08:11:19 crc kubenswrapper[5055]: I1011 08:11:19.214082 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fv5dz" event={"ID":"ca09096e-8fb1-458b-bf23-86c9b8e30de6","Type":"ContainerStarted","Data":"ce26fb4d12d09972edbd3345f4e967878034861e778e3220906dd84607e9b6e2"} Oct 11 08:11:19 crc kubenswrapper[5055]: I1011 08:11:19.233659 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fv5dz" podStartSLOduration=2.775765843 podStartE2EDuration="5.233638288s" podCreationTimestamp="2025-10-11 08:11:14 +0000 UTC" firstStartedPulling="2025-10-11 08:11:16.192559931 +0000 UTC m=+4659.966833758" lastFinishedPulling="2025-10-11 08:11:18.650432396 +0000 UTC m=+4662.424706203" observedRunningTime="2025-10-11 08:11:19.231939149 +0000 UTC m=+4663.006212966" watchObservedRunningTime="2025-10-11 08:11:19.233638288 +0000 UTC m=+4663.007912095" Oct 11 08:11:25 crc kubenswrapper[5055]: I1011 08:11:25.110981 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:25 crc kubenswrapper[5055]: I1011 08:11:25.112136 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:25 crc kubenswrapper[5055]: I1011 08:11:25.186094 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:25 crc kubenswrapper[5055]: I1011 08:11:25.321508 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:25 crc kubenswrapper[5055]: I1011 08:11:25.416443 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fv5dz"] Oct 11 08:11:27 crc kubenswrapper[5055]: I1011 08:11:27.266516 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fv5dz" podUID="ca09096e-8fb1-458b-bf23-86c9b8e30de6" containerName="registry-server" containerID="cri-o://ce26fb4d12d09972edbd3345f4e967878034861e778e3220906dd84607e9b6e2" gracePeriod=2 Oct 11 08:11:27 crc kubenswrapper[5055]: I1011 08:11:27.604816 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:27 crc kubenswrapper[5055]: I1011 08:11:27.655219 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxdj2\" (UniqueName: \"kubernetes.io/projected/ca09096e-8fb1-458b-bf23-86c9b8e30de6-kube-api-access-wxdj2\") pod \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\" (UID: \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\") " Oct 11 08:11:27 crc kubenswrapper[5055]: I1011 08:11:27.655306 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca09096e-8fb1-458b-bf23-86c9b8e30de6-utilities\") pod \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\" (UID: \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\") " Oct 11 08:11:27 crc kubenswrapper[5055]: I1011 08:11:27.655331 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca09096e-8fb1-458b-bf23-86c9b8e30de6-catalog-content\") pod \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\" (UID: \"ca09096e-8fb1-458b-bf23-86c9b8e30de6\") " Oct 11 08:11:27 crc kubenswrapper[5055]: I1011 08:11:27.656433 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca09096e-8fb1-458b-bf23-86c9b8e30de6-utilities" (OuterVolumeSpecName: "utilities") pod "ca09096e-8fb1-458b-bf23-86c9b8e30de6" (UID: "ca09096e-8fb1-458b-bf23-86c9b8e30de6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:11:27 crc kubenswrapper[5055]: I1011 08:11:27.660610 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca09096e-8fb1-458b-bf23-86c9b8e30de6-kube-api-access-wxdj2" (OuterVolumeSpecName: "kube-api-access-wxdj2") pod "ca09096e-8fb1-458b-bf23-86c9b8e30de6" (UID: "ca09096e-8fb1-458b-bf23-86c9b8e30de6"). InnerVolumeSpecName "kube-api-access-wxdj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:11:27 crc kubenswrapper[5055]: I1011 08:11:27.667857 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca09096e-8fb1-458b-bf23-86c9b8e30de6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ca09096e-8fb1-458b-bf23-86c9b8e30de6" (UID: "ca09096e-8fb1-458b-bf23-86c9b8e30de6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:11:27 crc kubenswrapper[5055]: I1011 08:11:27.771779 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxdj2\" (UniqueName: \"kubernetes.io/projected/ca09096e-8fb1-458b-bf23-86c9b8e30de6-kube-api-access-wxdj2\") on node \"crc\" DevicePath \"\"" Oct 11 08:11:27 crc kubenswrapper[5055]: I1011 08:11:27.771855 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca09096e-8fb1-458b-bf23-86c9b8e30de6-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:11:27 crc kubenswrapper[5055]: I1011 08:11:27.771870 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca09096e-8fb1-458b-bf23-86c9b8e30de6-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.273897 5055 generic.go:334] "Generic (PLEG): container finished" podID="ca09096e-8fb1-458b-bf23-86c9b8e30de6" containerID="ce26fb4d12d09972edbd3345f4e967878034861e778e3220906dd84607e9b6e2" exitCode=0 Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.273945 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fv5dz" event={"ID":"ca09096e-8fb1-458b-bf23-86c9b8e30de6","Type":"ContainerDied","Data":"ce26fb4d12d09972edbd3345f4e967878034861e778e3220906dd84607e9b6e2"} Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.273979 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fv5dz" event={"ID":"ca09096e-8fb1-458b-bf23-86c9b8e30de6","Type":"ContainerDied","Data":"61c48a9da091e18adc1d104c702fbd5b878e4d5b4391aeea45d69fb7130efeec"} Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.273998 5055 scope.go:117] "RemoveContainer" containerID="ce26fb4d12d09972edbd3345f4e967878034861e778e3220906dd84607e9b6e2" Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.274017 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fv5dz" Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.293707 5055 scope.go:117] "RemoveContainer" containerID="bf23b41a95b79c9bdb33af32e4fef8e0e3835c28861d5b89228e8f691365439a" Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.306674 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fv5dz"] Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.314166 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fv5dz"] Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.329893 5055 scope.go:117] "RemoveContainer" containerID="349609469ffb6bcf955c72194fdfcead1ca95b0120115f6ba646d739f7d14dea" Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.349561 5055 scope.go:117] "RemoveContainer" containerID="ce26fb4d12d09972edbd3345f4e967878034861e778e3220906dd84607e9b6e2" Oct 11 08:11:28 crc kubenswrapper[5055]: E1011 08:11:28.350039 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce26fb4d12d09972edbd3345f4e967878034861e778e3220906dd84607e9b6e2\": container with ID starting with ce26fb4d12d09972edbd3345f4e967878034861e778e3220906dd84607e9b6e2 not found: ID does not exist" containerID="ce26fb4d12d09972edbd3345f4e967878034861e778e3220906dd84607e9b6e2" Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.350073 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce26fb4d12d09972edbd3345f4e967878034861e778e3220906dd84607e9b6e2"} err="failed to get container status \"ce26fb4d12d09972edbd3345f4e967878034861e778e3220906dd84607e9b6e2\": rpc error: code = NotFound desc = could not find container \"ce26fb4d12d09972edbd3345f4e967878034861e778e3220906dd84607e9b6e2\": container with ID starting with ce26fb4d12d09972edbd3345f4e967878034861e778e3220906dd84607e9b6e2 not found: ID does not exist" Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.350095 5055 scope.go:117] "RemoveContainer" containerID="bf23b41a95b79c9bdb33af32e4fef8e0e3835c28861d5b89228e8f691365439a" Oct 11 08:11:28 crc kubenswrapper[5055]: E1011 08:11:28.350338 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf23b41a95b79c9bdb33af32e4fef8e0e3835c28861d5b89228e8f691365439a\": container with ID starting with bf23b41a95b79c9bdb33af32e4fef8e0e3835c28861d5b89228e8f691365439a not found: ID does not exist" containerID="bf23b41a95b79c9bdb33af32e4fef8e0e3835c28861d5b89228e8f691365439a" Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.350357 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf23b41a95b79c9bdb33af32e4fef8e0e3835c28861d5b89228e8f691365439a"} err="failed to get container status \"bf23b41a95b79c9bdb33af32e4fef8e0e3835c28861d5b89228e8f691365439a\": rpc error: code = NotFound desc = could not find container \"bf23b41a95b79c9bdb33af32e4fef8e0e3835c28861d5b89228e8f691365439a\": container with ID starting with bf23b41a95b79c9bdb33af32e4fef8e0e3835c28861d5b89228e8f691365439a not found: ID does not exist" Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.350370 5055 scope.go:117] "RemoveContainer" containerID="349609469ffb6bcf955c72194fdfcead1ca95b0120115f6ba646d739f7d14dea" Oct 11 08:11:28 crc kubenswrapper[5055]: E1011 08:11:28.350655 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"349609469ffb6bcf955c72194fdfcead1ca95b0120115f6ba646d739f7d14dea\": container with ID starting with 349609469ffb6bcf955c72194fdfcead1ca95b0120115f6ba646d739f7d14dea not found: ID does not exist" containerID="349609469ffb6bcf955c72194fdfcead1ca95b0120115f6ba646d739f7d14dea" Oct 11 08:11:28 crc kubenswrapper[5055]: I1011 08:11:28.350731 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"349609469ffb6bcf955c72194fdfcead1ca95b0120115f6ba646d739f7d14dea"} err="failed to get container status \"349609469ffb6bcf955c72194fdfcead1ca95b0120115f6ba646d739f7d14dea\": rpc error: code = NotFound desc = could not find container \"349609469ffb6bcf955c72194fdfcead1ca95b0120115f6ba646d739f7d14dea\": container with ID starting with 349609469ffb6bcf955c72194fdfcead1ca95b0120115f6ba646d739f7d14dea not found: ID does not exist" Oct 11 08:11:28 crc kubenswrapper[5055]: E1011 08:11:28.433990 5055 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podca09096e_8fb1_458b_bf23_86c9b8e30de6.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podca09096e_8fb1_458b_bf23_86c9b8e30de6.slice/crio-61c48a9da091e18adc1d104c702fbd5b878e4d5b4391aeea45d69fb7130efeec\": RecentStats: unable to find data in memory cache]" Oct 11 08:11:29 crc kubenswrapper[5055]: I1011 08:11:29.003591 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca09096e-8fb1-458b-bf23-86c9b8e30de6" path="/var/lib/kubelet/pods/ca09096e-8fb1-458b-bf23-86c9b8e30de6/volumes" Oct 11 08:12:02 crc kubenswrapper[5055]: I1011 08:12:02.422702 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:12:02 crc kubenswrapper[5055]: I1011 08:12:02.423521 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:12:32 crc kubenswrapper[5055]: I1011 08:12:32.422111 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:12:32 crc kubenswrapper[5055]: I1011 08:12:32.422679 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:13:02 crc kubenswrapper[5055]: I1011 08:13:02.422863 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:13:02 crc kubenswrapper[5055]: I1011 08:13:02.423473 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:13:02 crc kubenswrapper[5055]: I1011 08:13:02.423549 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 08:13:02 crc kubenswrapper[5055]: I1011 08:13:02.424459 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fab87695f69f6d5fb61c1a9b4d1592ef4a3ce9074a78571caec9a3d47a6bf3ff"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 08:13:02 crc kubenswrapper[5055]: I1011 08:13:02.424554 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://fab87695f69f6d5fb61c1a9b4d1592ef4a3ce9074a78571caec9a3d47a6bf3ff" gracePeriod=600 Oct 11 08:13:02 crc kubenswrapper[5055]: I1011 08:13:02.993125 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="fab87695f69f6d5fb61c1a9b4d1592ef4a3ce9074a78571caec9a3d47a6bf3ff" exitCode=0 Oct 11 08:13:02 crc kubenswrapper[5055]: I1011 08:13:02.993464 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"fab87695f69f6d5fb61c1a9b4d1592ef4a3ce9074a78571caec9a3d47a6bf3ff"} Oct 11 08:13:02 crc kubenswrapper[5055]: I1011 08:13:02.993498 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f"} Oct 11 08:13:02 crc kubenswrapper[5055]: I1011 08:13:02.993515 5055 scope.go:117] "RemoveContainer" containerID="18bc2dd241d5ef974b681082d1e83d259c5a16278ae2216263350448ddb11487" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.143567 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48"] Oct 11 08:15:00 crc kubenswrapper[5055]: E1011 08:15:00.144539 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca09096e-8fb1-458b-bf23-86c9b8e30de6" containerName="extract-content" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.144562 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca09096e-8fb1-458b-bf23-86c9b8e30de6" containerName="extract-content" Oct 11 08:15:00 crc kubenswrapper[5055]: E1011 08:15:00.144595 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca09096e-8fb1-458b-bf23-86c9b8e30de6" containerName="registry-server" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.144603 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca09096e-8fb1-458b-bf23-86c9b8e30de6" containerName="registry-server" Oct 11 08:15:00 crc kubenswrapper[5055]: E1011 08:15:00.144618 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca09096e-8fb1-458b-bf23-86c9b8e30de6" containerName="extract-utilities" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.144627 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca09096e-8fb1-458b-bf23-86c9b8e30de6" containerName="extract-utilities" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.144852 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca09096e-8fb1-458b-bf23-86c9b8e30de6" containerName="registry-server" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.145353 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.152577 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.152577 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.153177 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48"] Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.240334 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4crt6\" (UniqueName: \"kubernetes.io/projected/4a04b6dd-fd0e-43f6-a451-d017a7190daf-kube-api-access-4crt6\") pod \"collect-profiles-29336175-lsn48\" (UID: \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.240399 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a04b6dd-fd0e-43f6-a451-d017a7190daf-secret-volume\") pod \"collect-profiles-29336175-lsn48\" (UID: \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.240456 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a04b6dd-fd0e-43f6-a451-d017a7190daf-config-volume\") pod \"collect-profiles-29336175-lsn48\" (UID: \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.341268 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4crt6\" (UniqueName: \"kubernetes.io/projected/4a04b6dd-fd0e-43f6-a451-d017a7190daf-kube-api-access-4crt6\") pod \"collect-profiles-29336175-lsn48\" (UID: \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.341326 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a04b6dd-fd0e-43f6-a451-d017a7190daf-secret-volume\") pod \"collect-profiles-29336175-lsn48\" (UID: \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.341349 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a04b6dd-fd0e-43f6-a451-d017a7190daf-config-volume\") pod \"collect-profiles-29336175-lsn48\" (UID: \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.342204 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a04b6dd-fd0e-43f6-a451-d017a7190daf-config-volume\") pod \"collect-profiles-29336175-lsn48\" (UID: \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.348135 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a04b6dd-fd0e-43f6-a451-d017a7190daf-secret-volume\") pod \"collect-profiles-29336175-lsn48\" (UID: \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.356745 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4crt6\" (UniqueName: \"kubernetes.io/projected/4a04b6dd-fd0e-43f6-a451-d017a7190daf-kube-api-access-4crt6\") pod \"collect-profiles-29336175-lsn48\" (UID: \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.465242 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" Oct 11 08:15:00 crc kubenswrapper[5055]: I1011 08:15:00.869988 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48"] Oct 11 08:15:01 crc kubenswrapper[5055]: I1011 08:15:01.845145 5055 generic.go:334] "Generic (PLEG): container finished" podID="4a04b6dd-fd0e-43f6-a451-d017a7190daf" containerID="c56f262735a094c24b121a1d0a2c3f842942c07b1918ad7593d4c38e8444e161" exitCode=0 Oct 11 08:15:01 crc kubenswrapper[5055]: I1011 08:15:01.845253 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" event={"ID":"4a04b6dd-fd0e-43f6-a451-d017a7190daf","Type":"ContainerDied","Data":"c56f262735a094c24b121a1d0a2c3f842942c07b1918ad7593d4c38e8444e161"} Oct 11 08:15:01 crc kubenswrapper[5055]: I1011 08:15:01.845731 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" event={"ID":"4a04b6dd-fd0e-43f6-a451-d017a7190daf","Type":"ContainerStarted","Data":"9247a628c2dd5a3706fc8879155acccb48e9f2700ebcb88e74de8fde6097e8d5"} Oct 11 08:15:02 crc kubenswrapper[5055]: I1011 08:15:02.422565 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:15:02 crc kubenswrapper[5055]: I1011 08:15:02.422879 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:15:03 crc kubenswrapper[5055]: I1011 08:15:03.099421 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" Oct 11 08:15:03 crc kubenswrapper[5055]: I1011 08:15:03.284589 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4crt6\" (UniqueName: \"kubernetes.io/projected/4a04b6dd-fd0e-43f6-a451-d017a7190daf-kube-api-access-4crt6\") pod \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\" (UID: \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\") " Oct 11 08:15:03 crc kubenswrapper[5055]: I1011 08:15:03.284737 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a04b6dd-fd0e-43f6-a451-d017a7190daf-config-volume\") pod \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\" (UID: \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\") " Oct 11 08:15:03 crc kubenswrapper[5055]: I1011 08:15:03.284824 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a04b6dd-fd0e-43f6-a451-d017a7190daf-secret-volume\") pod \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\" (UID: \"4a04b6dd-fd0e-43f6-a451-d017a7190daf\") " Oct 11 08:15:03 crc kubenswrapper[5055]: I1011 08:15:03.285953 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a04b6dd-fd0e-43f6-a451-d017a7190daf-config-volume" (OuterVolumeSpecName: "config-volume") pod "4a04b6dd-fd0e-43f6-a451-d017a7190daf" (UID: "4a04b6dd-fd0e-43f6-a451-d017a7190daf"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 08:15:03 crc kubenswrapper[5055]: I1011 08:15:03.290296 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a04b6dd-fd0e-43f6-a451-d017a7190daf-kube-api-access-4crt6" (OuterVolumeSpecName: "kube-api-access-4crt6") pod "4a04b6dd-fd0e-43f6-a451-d017a7190daf" (UID: "4a04b6dd-fd0e-43f6-a451-d017a7190daf"). InnerVolumeSpecName "kube-api-access-4crt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:15:03 crc kubenswrapper[5055]: I1011 08:15:03.291509 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a04b6dd-fd0e-43f6-a451-d017a7190daf-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4a04b6dd-fd0e-43f6-a451-d017a7190daf" (UID: "4a04b6dd-fd0e-43f6-a451-d017a7190daf"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 08:15:03 crc kubenswrapper[5055]: I1011 08:15:03.386282 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4crt6\" (UniqueName: \"kubernetes.io/projected/4a04b6dd-fd0e-43f6-a451-d017a7190daf-kube-api-access-4crt6\") on node \"crc\" DevicePath \"\"" Oct 11 08:15:03 crc kubenswrapper[5055]: I1011 08:15:03.386324 5055 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a04b6dd-fd0e-43f6-a451-d017a7190daf-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 08:15:03 crc kubenswrapper[5055]: I1011 08:15:03.386334 5055 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a04b6dd-fd0e-43f6-a451-d017a7190daf-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 08:15:03 crc kubenswrapper[5055]: I1011 08:15:03.859185 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" event={"ID":"4a04b6dd-fd0e-43f6-a451-d017a7190daf","Type":"ContainerDied","Data":"9247a628c2dd5a3706fc8879155acccb48e9f2700ebcb88e74de8fde6097e8d5"} Oct 11 08:15:03 crc kubenswrapper[5055]: I1011 08:15:03.859230 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9247a628c2dd5a3706fc8879155acccb48e9f2700ebcb88e74de8fde6097e8d5" Oct 11 08:15:03 crc kubenswrapper[5055]: I1011 08:15:03.859278 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48" Oct 11 08:15:04 crc kubenswrapper[5055]: I1011 08:15:04.159931 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc"] Oct 11 08:15:04 crc kubenswrapper[5055]: I1011 08:15:04.164769 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336130-lljdc"] Oct 11 08:15:05 crc kubenswrapper[5055]: I1011 08:15:05.002563 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1e30299-f1f2-44db-9782-40de14373f55" path="/var/lib/kubelet/pods/b1e30299-f1f2-44db-9782-40de14373f55/volumes" Oct 11 08:15:32 crc kubenswrapper[5055]: I1011 08:15:32.422169 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:15:32 crc kubenswrapper[5055]: I1011 08:15:32.422739 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.098944 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6dqf4"] Oct 11 08:15:40 crc kubenswrapper[5055]: E1011 08:15:40.099981 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a04b6dd-fd0e-43f6-a451-d017a7190daf" containerName="collect-profiles" Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.099997 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a04b6dd-fd0e-43f6-a451-d017a7190daf" containerName="collect-profiles" Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.100196 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a04b6dd-fd0e-43f6-a451-d017a7190daf" containerName="collect-profiles" Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.101409 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.117296 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6dqf4"] Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.296680 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27a60b93-3617-4ec5-91ab-2b9ac46696c7-catalog-content\") pod \"certified-operators-6dqf4\" (UID: \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\") " pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.296741 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l729s\" (UniqueName: \"kubernetes.io/projected/27a60b93-3617-4ec5-91ab-2b9ac46696c7-kube-api-access-l729s\") pod \"certified-operators-6dqf4\" (UID: \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\") " pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.296860 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27a60b93-3617-4ec5-91ab-2b9ac46696c7-utilities\") pod \"certified-operators-6dqf4\" (UID: \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\") " pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.398276 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27a60b93-3617-4ec5-91ab-2b9ac46696c7-utilities\") pod \"certified-operators-6dqf4\" (UID: \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\") " pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.398854 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27a60b93-3617-4ec5-91ab-2b9ac46696c7-utilities\") pod \"certified-operators-6dqf4\" (UID: \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\") " pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.399037 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27a60b93-3617-4ec5-91ab-2b9ac46696c7-catalog-content\") pod \"certified-operators-6dqf4\" (UID: \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\") " pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.399063 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l729s\" (UniqueName: \"kubernetes.io/projected/27a60b93-3617-4ec5-91ab-2b9ac46696c7-kube-api-access-l729s\") pod \"certified-operators-6dqf4\" (UID: \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\") " pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.399548 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27a60b93-3617-4ec5-91ab-2b9ac46696c7-catalog-content\") pod \"certified-operators-6dqf4\" (UID: \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\") " pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.432745 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l729s\" (UniqueName: \"kubernetes.io/projected/27a60b93-3617-4ec5-91ab-2b9ac46696c7-kube-api-access-l729s\") pod \"certified-operators-6dqf4\" (UID: \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\") " pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:40 crc kubenswrapper[5055]: I1011 08:15:40.724474 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:41 crc kubenswrapper[5055]: I1011 08:15:41.154115 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6dqf4"] Oct 11 08:15:42 crc kubenswrapper[5055]: I1011 08:15:42.162637 5055 generic.go:334] "Generic (PLEG): container finished" podID="27a60b93-3617-4ec5-91ab-2b9ac46696c7" containerID="958da18971f1ef665b9f3fd79f92ff2ff9072ab697513057009f253dc19b0a38" exitCode=0 Oct 11 08:15:42 crc kubenswrapper[5055]: I1011 08:15:42.162786 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6dqf4" event={"ID":"27a60b93-3617-4ec5-91ab-2b9ac46696c7","Type":"ContainerDied","Data":"958da18971f1ef665b9f3fd79f92ff2ff9072ab697513057009f253dc19b0a38"} Oct 11 08:15:42 crc kubenswrapper[5055]: I1011 08:15:42.164208 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6dqf4" event={"ID":"27a60b93-3617-4ec5-91ab-2b9ac46696c7","Type":"ContainerStarted","Data":"41a3142e446df5b8ff9db91e2f8684f9d6270592f5b135b87debb5ea1143005e"} Oct 11 08:15:42 crc kubenswrapper[5055]: I1011 08:15:42.165366 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 08:15:44 crc kubenswrapper[5055]: I1011 08:15:44.177052 5055 generic.go:334] "Generic (PLEG): container finished" podID="27a60b93-3617-4ec5-91ab-2b9ac46696c7" containerID="7e4a57f2691efab97690308832f9240cd277bc430db8b3000c589d95818d5997" exitCode=0 Oct 11 08:15:44 crc kubenswrapper[5055]: I1011 08:15:44.177103 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6dqf4" event={"ID":"27a60b93-3617-4ec5-91ab-2b9ac46696c7","Type":"ContainerDied","Data":"7e4a57f2691efab97690308832f9240cd277bc430db8b3000c589d95818d5997"} Oct 11 08:15:45 crc kubenswrapper[5055]: I1011 08:15:45.185839 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6dqf4" event={"ID":"27a60b93-3617-4ec5-91ab-2b9ac46696c7","Type":"ContainerStarted","Data":"a96d303694227b0cc8c6056c477035b1fbfa7f08bf2aafd6f6cd321f30a10d3f"} Oct 11 08:15:45 crc kubenswrapper[5055]: I1011 08:15:45.210699 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6dqf4" podStartSLOduration=2.4931896350000002 podStartE2EDuration="5.210681171s" podCreationTimestamp="2025-10-11 08:15:40 +0000 UTC" firstStartedPulling="2025-10-11 08:15:42.164930444 +0000 UTC m=+4925.939204281" lastFinishedPulling="2025-10-11 08:15:44.88242201 +0000 UTC m=+4928.656695817" observedRunningTime="2025-10-11 08:15:45.205077372 +0000 UTC m=+4928.979351199" watchObservedRunningTime="2025-10-11 08:15:45.210681171 +0000 UTC m=+4928.984954978" Oct 11 08:15:50 crc kubenswrapper[5055]: I1011 08:15:50.724820 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:50 crc kubenswrapper[5055]: I1011 08:15:50.725400 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:50 crc kubenswrapper[5055]: I1011 08:15:50.801021 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:51 crc kubenswrapper[5055]: I1011 08:15:51.265546 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:51 crc kubenswrapper[5055]: I1011 08:15:51.310005 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6dqf4"] Oct 11 08:15:53 crc kubenswrapper[5055]: I1011 08:15:53.243096 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6dqf4" podUID="27a60b93-3617-4ec5-91ab-2b9ac46696c7" containerName="registry-server" containerID="cri-o://a96d303694227b0cc8c6056c477035b1fbfa7f08bf2aafd6f6cd321f30a10d3f" gracePeriod=2 Oct 11 08:15:53 crc kubenswrapper[5055]: I1011 08:15:53.675665 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:53 crc kubenswrapper[5055]: I1011 08:15:53.789684 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27a60b93-3617-4ec5-91ab-2b9ac46696c7-catalog-content\") pod \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\" (UID: \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\") " Oct 11 08:15:53 crc kubenswrapper[5055]: I1011 08:15:53.789732 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27a60b93-3617-4ec5-91ab-2b9ac46696c7-utilities\") pod \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\" (UID: \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\") " Oct 11 08:15:53 crc kubenswrapper[5055]: I1011 08:15:53.789820 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l729s\" (UniqueName: \"kubernetes.io/projected/27a60b93-3617-4ec5-91ab-2b9ac46696c7-kube-api-access-l729s\") pod \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\" (UID: \"27a60b93-3617-4ec5-91ab-2b9ac46696c7\") " Oct 11 08:15:53 crc kubenswrapper[5055]: I1011 08:15:53.790727 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27a60b93-3617-4ec5-91ab-2b9ac46696c7-utilities" (OuterVolumeSpecName: "utilities") pod "27a60b93-3617-4ec5-91ab-2b9ac46696c7" (UID: "27a60b93-3617-4ec5-91ab-2b9ac46696c7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:15:53 crc kubenswrapper[5055]: I1011 08:15:53.797036 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27a60b93-3617-4ec5-91ab-2b9ac46696c7-kube-api-access-l729s" (OuterVolumeSpecName: "kube-api-access-l729s") pod "27a60b93-3617-4ec5-91ab-2b9ac46696c7" (UID: "27a60b93-3617-4ec5-91ab-2b9ac46696c7"). InnerVolumeSpecName "kube-api-access-l729s". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:15:53 crc kubenswrapper[5055]: I1011 08:15:53.848262 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27a60b93-3617-4ec5-91ab-2b9ac46696c7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "27a60b93-3617-4ec5-91ab-2b9ac46696c7" (UID: "27a60b93-3617-4ec5-91ab-2b9ac46696c7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:15:53 crc kubenswrapper[5055]: I1011 08:15:53.891600 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27a60b93-3617-4ec5-91ab-2b9ac46696c7-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:15:53 crc kubenswrapper[5055]: I1011 08:15:53.891646 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27a60b93-3617-4ec5-91ab-2b9ac46696c7-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:15:53 crc kubenswrapper[5055]: I1011 08:15:53.891660 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l729s\" (UniqueName: \"kubernetes.io/projected/27a60b93-3617-4ec5-91ab-2b9ac46696c7-kube-api-access-l729s\") on node \"crc\" DevicePath \"\"" Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.060179 5055 scope.go:117] "RemoveContainer" containerID="2ff16425d66b3317f75c747be7e31c9e91624ab6f9ed3f8c25560d428beecd12" Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.253386 5055 generic.go:334] "Generic (PLEG): container finished" podID="27a60b93-3617-4ec5-91ab-2b9ac46696c7" containerID="a96d303694227b0cc8c6056c477035b1fbfa7f08bf2aafd6f6cd321f30a10d3f" exitCode=0 Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.253432 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6dqf4" event={"ID":"27a60b93-3617-4ec5-91ab-2b9ac46696c7","Type":"ContainerDied","Data":"a96d303694227b0cc8c6056c477035b1fbfa7f08bf2aafd6f6cd321f30a10d3f"} Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.253442 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6dqf4" Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.253459 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6dqf4" event={"ID":"27a60b93-3617-4ec5-91ab-2b9ac46696c7","Type":"ContainerDied","Data":"41a3142e446df5b8ff9db91e2f8684f9d6270592f5b135b87debb5ea1143005e"} Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.253481 5055 scope.go:117] "RemoveContainer" containerID="a96d303694227b0cc8c6056c477035b1fbfa7f08bf2aafd6f6cd321f30a10d3f" Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.287745 5055 scope.go:117] "RemoveContainer" containerID="7e4a57f2691efab97690308832f9240cd277bc430db8b3000c589d95818d5997" Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.291428 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6dqf4"] Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.298617 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6dqf4"] Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.313091 5055 scope.go:117] "RemoveContainer" containerID="958da18971f1ef665b9f3fd79f92ff2ff9072ab697513057009f253dc19b0a38" Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.341460 5055 scope.go:117] "RemoveContainer" containerID="a96d303694227b0cc8c6056c477035b1fbfa7f08bf2aafd6f6cd321f30a10d3f" Oct 11 08:15:54 crc kubenswrapper[5055]: E1011 08:15:54.341943 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a96d303694227b0cc8c6056c477035b1fbfa7f08bf2aafd6f6cd321f30a10d3f\": container with ID starting with a96d303694227b0cc8c6056c477035b1fbfa7f08bf2aafd6f6cd321f30a10d3f not found: ID does not exist" containerID="a96d303694227b0cc8c6056c477035b1fbfa7f08bf2aafd6f6cd321f30a10d3f" Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.342006 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a96d303694227b0cc8c6056c477035b1fbfa7f08bf2aafd6f6cd321f30a10d3f"} err="failed to get container status \"a96d303694227b0cc8c6056c477035b1fbfa7f08bf2aafd6f6cd321f30a10d3f\": rpc error: code = NotFound desc = could not find container \"a96d303694227b0cc8c6056c477035b1fbfa7f08bf2aafd6f6cd321f30a10d3f\": container with ID starting with a96d303694227b0cc8c6056c477035b1fbfa7f08bf2aafd6f6cd321f30a10d3f not found: ID does not exist" Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.342053 5055 scope.go:117] "RemoveContainer" containerID="7e4a57f2691efab97690308832f9240cd277bc430db8b3000c589d95818d5997" Oct 11 08:15:54 crc kubenswrapper[5055]: E1011 08:15:54.342451 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e4a57f2691efab97690308832f9240cd277bc430db8b3000c589d95818d5997\": container with ID starting with 7e4a57f2691efab97690308832f9240cd277bc430db8b3000c589d95818d5997 not found: ID does not exist" containerID="7e4a57f2691efab97690308832f9240cd277bc430db8b3000c589d95818d5997" Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.342504 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e4a57f2691efab97690308832f9240cd277bc430db8b3000c589d95818d5997"} err="failed to get container status \"7e4a57f2691efab97690308832f9240cd277bc430db8b3000c589d95818d5997\": rpc error: code = NotFound desc = could not find container \"7e4a57f2691efab97690308832f9240cd277bc430db8b3000c589d95818d5997\": container with ID starting with 7e4a57f2691efab97690308832f9240cd277bc430db8b3000c589d95818d5997 not found: ID does not exist" Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.342534 5055 scope.go:117] "RemoveContainer" containerID="958da18971f1ef665b9f3fd79f92ff2ff9072ab697513057009f253dc19b0a38" Oct 11 08:15:54 crc kubenswrapper[5055]: E1011 08:15:54.342882 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"958da18971f1ef665b9f3fd79f92ff2ff9072ab697513057009f253dc19b0a38\": container with ID starting with 958da18971f1ef665b9f3fd79f92ff2ff9072ab697513057009f253dc19b0a38 not found: ID does not exist" containerID="958da18971f1ef665b9f3fd79f92ff2ff9072ab697513057009f253dc19b0a38" Oct 11 08:15:54 crc kubenswrapper[5055]: I1011 08:15:54.342925 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"958da18971f1ef665b9f3fd79f92ff2ff9072ab697513057009f253dc19b0a38"} err="failed to get container status \"958da18971f1ef665b9f3fd79f92ff2ff9072ab697513057009f253dc19b0a38\": rpc error: code = NotFound desc = could not find container \"958da18971f1ef665b9f3fd79f92ff2ff9072ab697513057009f253dc19b0a38\": container with ID starting with 958da18971f1ef665b9f3fd79f92ff2ff9072ab697513057009f253dc19b0a38 not found: ID does not exist" Oct 11 08:15:55 crc kubenswrapper[5055]: I1011 08:15:55.008961 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27a60b93-3617-4ec5-91ab-2b9ac46696c7" path="/var/lib/kubelet/pods/27a60b93-3617-4ec5-91ab-2b9ac46696c7/volumes" Oct 11 08:16:02 crc kubenswrapper[5055]: I1011 08:16:02.422474 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:16:02 crc kubenswrapper[5055]: I1011 08:16:02.423119 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:16:02 crc kubenswrapper[5055]: I1011 08:16:02.423172 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 08:16:02 crc kubenswrapper[5055]: I1011 08:16:02.423844 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 08:16:02 crc kubenswrapper[5055]: I1011 08:16:02.423995 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" gracePeriod=600 Oct 11 08:16:02 crc kubenswrapper[5055]: E1011 08:16:02.553692 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:16:03 crc kubenswrapper[5055]: I1011 08:16:03.334825 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" exitCode=0 Oct 11 08:16:03 crc kubenswrapper[5055]: I1011 08:16:03.335182 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f"} Oct 11 08:16:03 crc kubenswrapper[5055]: I1011 08:16:03.335320 5055 scope.go:117] "RemoveContainer" containerID="fab87695f69f6d5fb61c1a9b4d1592ef4a3ce9074a78571caec9a3d47a6bf3ff" Oct 11 08:16:03 crc kubenswrapper[5055]: I1011 08:16:03.335977 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:16:03 crc kubenswrapper[5055]: E1011 08:16:03.336360 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:16:13 crc kubenswrapper[5055]: I1011 08:16:13.993977 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:16:13 crc kubenswrapper[5055]: E1011 08:16:13.994826 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:16:28 crc kubenswrapper[5055]: I1011 08:16:28.993139 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:16:28 crc kubenswrapper[5055]: E1011 08:16:28.993935 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:16:39 crc kubenswrapper[5055]: I1011 08:16:39.993314 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:16:39 crc kubenswrapper[5055]: E1011 08:16:39.994069 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:16:52 crc kubenswrapper[5055]: I1011 08:16:52.994308 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:16:52 crc kubenswrapper[5055]: E1011 08:16:52.997199 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:17:05 crc kubenswrapper[5055]: I1011 08:17:05.993499 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:17:05 crc kubenswrapper[5055]: E1011 08:17:05.994548 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:17:17 crc kubenswrapper[5055]: I1011 08:17:17.002403 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:17:17 crc kubenswrapper[5055]: E1011 08:17:17.003088 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:17:29 crc kubenswrapper[5055]: I1011 08:17:29.993845 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:17:29 crc kubenswrapper[5055]: E1011 08:17:29.994531 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:17:44 crc kubenswrapper[5055]: I1011 08:17:44.995506 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:17:44 crc kubenswrapper[5055]: E1011 08:17:44.999640 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:17:57 crc kubenswrapper[5055]: I1011 08:17:57.002800 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:17:57 crc kubenswrapper[5055]: E1011 08:17:57.003887 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:18:07 crc kubenswrapper[5055]: I1011 08:18:07.994346 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:18:07 crc kubenswrapper[5055]: E1011 08:18:07.996194 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:18:21 crc kubenswrapper[5055]: I1011 08:18:21.993510 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:18:21 crc kubenswrapper[5055]: E1011 08:18:21.994470 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:18:36 crc kubenswrapper[5055]: I1011 08:18:36.997438 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:18:36 crc kubenswrapper[5055]: E1011 08:18:36.998060 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:18:48 crc kubenswrapper[5055]: I1011 08:18:48.993168 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:18:48 crc kubenswrapper[5055]: E1011 08:18:48.993933 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:18:59 crc kubenswrapper[5055]: I1011 08:18:59.994080 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:18:59 crc kubenswrapper[5055]: E1011 08:18:59.994749 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:19:13 crc kubenswrapper[5055]: I1011 08:19:13.993670 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:19:13 crc kubenswrapper[5055]: E1011 08:19:13.994515 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:19:27 crc kubenswrapper[5055]: I1011 08:19:27.993512 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:19:27 crc kubenswrapper[5055]: E1011 08:19:27.994294 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:19:38 crc kubenswrapper[5055]: I1011 08:19:38.993954 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:19:38 crc kubenswrapper[5055]: E1011 08:19:38.994892 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:19:52 crc kubenswrapper[5055]: I1011 08:19:52.994158 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:19:52 crc kubenswrapper[5055]: E1011 08:19:52.995309 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:20:05 crc kubenswrapper[5055]: I1011 08:20:05.993408 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:20:05 crc kubenswrapper[5055]: E1011 08:20:05.994280 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:20:18 crc kubenswrapper[5055]: I1011 08:20:18.993089 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:20:18 crc kubenswrapper[5055]: E1011 08:20:18.993865 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:20:33 crc kubenswrapper[5055]: I1011 08:20:33.994157 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:20:33 crc kubenswrapper[5055]: E1011 08:20:33.995454 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:20:44 crc kubenswrapper[5055]: I1011 08:20:44.993360 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:20:44 crc kubenswrapper[5055]: E1011 08:20:44.994394 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.162232 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nx7x2"] Oct 11 08:20:55 crc kubenswrapper[5055]: E1011 08:20:55.163137 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27a60b93-3617-4ec5-91ab-2b9ac46696c7" containerName="extract-utilities" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.163156 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="27a60b93-3617-4ec5-91ab-2b9ac46696c7" containerName="extract-utilities" Oct 11 08:20:55 crc kubenswrapper[5055]: E1011 08:20:55.163176 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27a60b93-3617-4ec5-91ab-2b9ac46696c7" containerName="extract-content" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.163190 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="27a60b93-3617-4ec5-91ab-2b9ac46696c7" containerName="extract-content" Oct 11 08:20:55 crc kubenswrapper[5055]: E1011 08:20:55.163220 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27a60b93-3617-4ec5-91ab-2b9ac46696c7" containerName="registry-server" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.163232 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="27a60b93-3617-4ec5-91ab-2b9ac46696c7" containerName="registry-server" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.163446 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="27a60b93-3617-4ec5-91ab-2b9ac46696c7" containerName="registry-server" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.164649 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.205923 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nx7x2"] Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.267182 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7725c85-88b1-47f5-afb7-8953cdea0f5f-catalog-content\") pod \"redhat-operators-nx7x2\" (UID: \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\") " pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.267316 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7725c85-88b1-47f5-afb7-8953cdea0f5f-utilities\") pod \"redhat-operators-nx7x2\" (UID: \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\") " pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.267409 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpj72\" (UniqueName: \"kubernetes.io/projected/f7725c85-88b1-47f5-afb7-8953cdea0f5f-kube-api-access-wpj72\") pod \"redhat-operators-nx7x2\" (UID: \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\") " pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.368711 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7725c85-88b1-47f5-afb7-8953cdea0f5f-catalog-content\") pod \"redhat-operators-nx7x2\" (UID: \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\") " pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.368795 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7725c85-88b1-47f5-afb7-8953cdea0f5f-utilities\") pod \"redhat-operators-nx7x2\" (UID: \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\") " pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.368844 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpj72\" (UniqueName: \"kubernetes.io/projected/f7725c85-88b1-47f5-afb7-8953cdea0f5f-kube-api-access-wpj72\") pod \"redhat-operators-nx7x2\" (UID: \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\") " pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.369361 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7725c85-88b1-47f5-afb7-8953cdea0f5f-catalog-content\") pod \"redhat-operators-nx7x2\" (UID: \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\") " pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.369554 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7725c85-88b1-47f5-afb7-8953cdea0f5f-utilities\") pod \"redhat-operators-nx7x2\" (UID: \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\") " pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.387518 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpj72\" (UniqueName: \"kubernetes.io/projected/f7725c85-88b1-47f5-afb7-8953cdea0f5f-kube-api-access-wpj72\") pod \"redhat-operators-nx7x2\" (UID: \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\") " pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.504085 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:20:55 crc kubenswrapper[5055]: I1011 08:20:55.927123 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nx7x2"] Oct 11 08:20:56 crc kubenswrapper[5055]: I1011 08:20:56.725199 5055 generic.go:334] "Generic (PLEG): container finished" podID="f7725c85-88b1-47f5-afb7-8953cdea0f5f" containerID="8aa66f89d1029a5b0dd4be84bd5f3a05ca2d2f840d368273656e96b61e98630d" exitCode=0 Oct 11 08:20:56 crc kubenswrapper[5055]: I1011 08:20:56.725263 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nx7x2" event={"ID":"f7725c85-88b1-47f5-afb7-8953cdea0f5f","Type":"ContainerDied","Data":"8aa66f89d1029a5b0dd4be84bd5f3a05ca2d2f840d368273656e96b61e98630d"} Oct 11 08:20:56 crc kubenswrapper[5055]: I1011 08:20:56.725309 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nx7x2" event={"ID":"f7725c85-88b1-47f5-afb7-8953cdea0f5f","Type":"ContainerStarted","Data":"4443b1a651611a5fc02767008a7d858ab508143bea6afafadab97f46a693cbf2"} Oct 11 08:20:56 crc kubenswrapper[5055]: I1011 08:20:56.730192 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 08:20:57 crc kubenswrapper[5055]: I1011 08:20:57.001470 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:20:57 crc kubenswrapper[5055]: E1011 08:20:57.002262 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:20:57 crc kubenswrapper[5055]: I1011 08:20:57.735593 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nx7x2" event={"ID":"f7725c85-88b1-47f5-afb7-8953cdea0f5f","Type":"ContainerStarted","Data":"471e6e2eef427bd5a3c795d49f0ed8e0bdd22a50156565b9956c419bebe14d6f"} Oct 11 08:20:58 crc kubenswrapper[5055]: I1011 08:20:58.747834 5055 generic.go:334] "Generic (PLEG): container finished" podID="f7725c85-88b1-47f5-afb7-8953cdea0f5f" containerID="471e6e2eef427bd5a3c795d49f0ed8e0bdd22a50156565b9956c419bebe14d6f" exitCode=0 Oct 11 08:20:58 crc kubenswrapper[5055]: I1011 08:20:58.747912 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nx7x2" event={"ID":"f7725c85-88b1-47f5-afb7-8953cdea0f5f","Type":"ContainerDied","Data":"471e6e2eef427bd5a3c795d49f0ed8e0bdd22a50156565b9956c419bebe14d6f"} Oct 11 08:20:59 crc kubenswrapper[5055]: I1011 08:20:59.757471 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nx7x2" event={"ID":"f7725c85-88b1-47f5-afb7-8953cdea0f5f","Type":"ContainerStarted","Data":"ed568d6f42eeaf0efb75bd9975ed6e2fc59e57a8989070223edd9c4e999f492e"} Oct 11 08:20:59 crc kubenswrapper[5055]: I1011 08:20:59.783028 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nx7x2" podStartSLOduration=2.242101118 podStartE2EDuration="4.78300726s" podCreationTimestamp="2025-10-11 08:20:55 +0000 UTC" firstStartedPulling="2025-10-11 08:20:56.729746909 +0000 UTC m=+5240.504020746" lastFinishedPulling="2025-10-11 08:20:59.270653091 +0000 UTC m=+5243.044926888" observedRunningTime="2025-10-11 08:20:59.7808998 +0000 UTC m=+5243.555173647" watchObservedRunningTime="2025-10-11 08:20:59.78300726 +0000 UTC m=+5243.557281077" Oct 11 08:21:05 crc kubenswrapper[5055]: I1011 08:21:05.505098 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:21:05 crc kubenswrapper[5055]: I1011 08:21:05.505723 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:21:05 crc kubenswrapper[5055]: I1011 08:21:05.566149 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:21:05 crc kubenswrapper[5055]: I1011 08:21:05.867779 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:21:05 crc kubenswrapper[5055]: I1011 08:21:05.904742 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nx7x2"] Oct 11 08:21:07 crc kubenswrapper[5055]: I1011 08:21:07.813408 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nx7x2" podUID="f7725c85-88b1-47f5-afb7-8953cdea0f5f" containerName="registry-server" containerID="cri-o://ed568d6f42eeaf0efb75bd9975ed6e2fc59e57a8989070223edd9c4e999f492e" gracePeriod=2 Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.283377 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.471455 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7725c85-88b1-47f5-afb7-8953cdea0f5f-catalog-content\") pod \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\" (UID: \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\") " Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.471611 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wpj72\" (UniqueName: \"kubernetes.io/projected/f7725c85-88b1-47f5-afb7-8953cdea0f5f-kube-api-access-wpj72\") pod \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\" (UID: \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\") " Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.471760 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7725c85-88b1-47f5-afb7-8953cdea0f5f-utilities\") pod \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\" (UID: \"f7725c85-88b1-47f5-afb7-8953cdea0f5f\") " Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.474413 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7725c85-88b1-47f5-afb7-8953cdea0f5f-utilities" (OuterVolumeSpecName: "utilities") pod "f7725c85-88b1-47f5-afb7-8953cdea0f5f" (UID: "f7725c85-88b1-47f5-afb7-8953cdea0f5f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.494084 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7725c85-88b1-47f5-afb7-8953cdea0f5f-kube-api-access-wpj72" (OuterVolumeSpecName: "kube-api-access-wpj72") pod "f7725c85-88b1-47f5-afb7-8953cdea0f5f" (UID: "f7725c85-88b1-47f5-afb7-8953cdea0f5f"). InnerVolumeSpecName "kube-api-access-wpj72". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.558973 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7725c85-88b1-47f5-afb7-8953cdea0f5f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7725c85-88b1-47f5-afb7-8953cdea0f5f" (UID: "f7725c85-88b1-47f5-afb7-8953cdea0f5f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.574390 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7725c85-88b1-47f5-afb7-8953cdea0f5f-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.574431 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7725c85-88b1-47f5-afb7-8953cdea0f5f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.574446 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wpj72\" (UniqueName: \"kubernetes.io/projected/f7725c85-88b1-47f5-afb7-8953cdea0f5f-kube-api-access-wpj72\") on node \"crc\" DevicePath \"\"" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.828546 5055 generic.go:334] "Generic (PLEG): container finished" podID="f7725c85-88b1-47f5-afb7-8953cdea0f5f" containerID="ed568d6f42eeaf0efb75bd9975ed6e2fc59e57a8989070223edd9c4e999f492e" exitCode=0 Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.828604 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nx7x2" event={"ID":"f7725c85-88b1-47f5-afb7-8953cdea0f5f","Type":"ContainerDied","Data":"ed568d6f42eeaf0efb75bd9975ed6e2fc59e57a8989070223edd9c4e999f492e"} Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.828645 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nx7x2" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.828668 5055 scope.go:117] "RemoveContainer" containerID="ed568d6f42eeaf0efb75bd9975ed6e2fc59e57a8989070223edd9c4e999f492e" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.828656 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nx7x2" event={"ID":"f7725c85-88b1-47f5-afb7-8953cdea0f5f","Type":"ContainerDied","Data":"4443b1a651611a5fc02767008a7d858ab508143bea6afafadab97f46a693cbf2"} Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.853988 5055 scope.go:117] "RemoveContainer" containerID="471e6e2eef427bd5a3c795d49f0ed8e0bdd22a50156565b9956c419bebe14d6f" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.866403 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nx7x2"] Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.871995 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nx7x2"] Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.892448 5055 scope.go:117] "RemoveContainer" containerID="8aa66f89d1029a5b0dd4be84bd5f3a05ca2d2f840d368273656e96b61e98630d" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.910844 5055 scope.go:117] "RemoveContainer" containerID="ed568d6f42eeaf0efb75bd9975ed6e2fc59e57a8989070223edd9c4e999f492e" Oct 11 08:21:09 crc kubenswrapper[5055]: E1011 08:21:09.911385 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed568d6f42eeaf0efb75bd9975ed6e2fc59e57a8989070223edd9c4e999f492e\": container with ID starting with ed568d6f42eeaf0efb75bd9975ed6e2fc59e57a8989070223edd9c4e999f492e not found: ID does not exist" containerID="ed568d6f42eeaf0efb75bd9975ed6e2fc59e57a8989070223edd9c4e999f492e" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.911430 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed568d6f42eeaf0efb75bd9975ed6e2fc59e57a8989070223edd9c4e999f492e"} err="failed to get container status \"ed568d6f42eeaf0efb75bd9975ed6e2fc59e57a8989070223edd9c4e999f492e\": rpc error: code = NotFound desc = could not find container \"ed568d6f42eeaf0efb75bd9975ed6e2fc59e57a8989070223edd9c4e999f492e\": container with ID starting with ed568d6f42eeaf0efb75bd9975ed6e2fc59e57a8989070223edd9c4e999f492e not found: ID does not exist" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.911458 5055 scope.go:117] "RemoveContainer" containerID="471e6e2eef427bd5a3c795d49f0ed8e0bdd22a50156565b9956c419bebe14d6f" Oct 11 08:21:09 crc kubenswrapper[5055]: E1011 08:21:09.911732 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"471e6e2eef427bd5a3c795d49f0ed8e0bdd22a50156565b9956c419bebe14d6f\": container with ID starting with 471e6e2eef427bd5a3c795d49f0ed8e0bdd22a50156565b9956c419bebe14d6f not found: ID does not exist" containerID="471e6e2eef427bd5a3c795d49f0ed8e0bdd22a50156565b9956c419bebe14d6f" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.911819 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"471e6e2eef427bd5a3c795d49f0ed8e0bdd22a50156565b9956c419bebe14d6f"} err="failed to get container status \"471e6e2eef427bd5a3c795d49f0ed8e0bdd22a50156565b9956c419bebe14d6f\": rpc error: code = NotFound desc = could not find container \"471e6e2eef427bd5a3c795d49f0ed8e0bdd22a50156565b9956c419bebe14d6f\": container with ID starting with 471e6e2eef427bd5a3c795d49f0ed8e0bdd22a50156565b9956c419bebe14d6f not found: ID does not exist" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.911857 5055 scope.go:117] "RemoveContainer" containerID="8aa66f89d1029a5b0dd4be84bd5f3a05ca2d2f840d368273656e96b61e98630d" Oct 11 08:21:09 crc kubenswrapper[5055]: E1011 08:21:09.912159 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8aa66f89d1029a5b0dd4be84bd5f3a05ca2d2f840d368273656e96b61e98630d\": container with ID starting with 8aa66f89d1029a5b0dd4be84bd5f3a05ca2d2f840d368273656e96b61e98630d not found: ID does not exist" containerID="8aa66f89d1029a5b0dd4be84bd5f3a05ca2d2f840d368273656e96b61e98630d" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.912378 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8aa66f89d1029a5b0dd4be84bd5f3a05ca2d2f840d368273656e96b61e98630d"} err="failed to get container status \"8aa66f89d1029a5b0dd4be84bd5f3a05ca2d2f840d368273656e96b61e98630d\": rpc error: code = NotFound desc = could not find container \"8aa66f89d1029a5b0dd4be84bd5f3a05ca2d2f840d368273656e96b61e98630d\": container with ID starting with 8aa66f89d1029a5b0dd4be84bd5f3a05ca2d2f840d368273656e96b61e98630d not found: ID does not exist" Oct 11 08:21:09 crc kubenswrapper[5055]: I1011 08:21:09.993476 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:21:10 crc kubenswrapper[5055]: I1011 08:21:10.838864 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"a3401eca5335f09c8656b73a49289e48163ae3cf23e62a6059ceeca3ca3db7de"} Oct 11 08:21:11 crc kubenswrapper[5055]: I1011 08:21:11.002166 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7725c85-88b1-47f5-afb7-8953cdea0f5f" path="/var/lib/kubelet/pods/f7725c85-88b1-47f5-afb7-8953cdea0f5f/volumes" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.586707 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8pxr4"] Oct 11 08:22:50 crc kubenswrapper[5055]: E1011 08:22:50.587794 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7725c85-88b1-47f5-afb7-8953cdea0f5f" containerName="extract-content" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.587814 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7725c85-88b1-47f5-afb7-8953cdea0f5f" containerName="extract-content" Oct 11 08:22:50 crc kubenswrapper[5055]: E1011 08:22:50.587857 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7725c85-88b1-47f5-afb7-8953cdea0f5f" containerName="registry-server" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.587867 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7725c85-88b1-47f5-afb7-8953cdea0f5f" containerName="registry-server" Oct 11 08:22:50 crc kubenswrapper[5055]: E1011 08:22:50.587891 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7725c85-88b1-47f5-afb7-8953cdea0f5f" containerName="extract-utilities" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.587902 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7725c85-88b1-47f5-afb7-8953cdea0f5f" containerName="extract-utilities" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.588130 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7725c85-88b1-47f5-afb7-8953cdea0f5f" containerName="registry-server" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.589801 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.604602 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8pxr4"] Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.652724 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23a78636-1947-4ed4-80a6-829225dc5e3b-catalog-content\") pod \"redhat-marketplace-8pxr4\" (UID: \"23a78636-1947-4ed4-80a6-829225dc5e3b\") " pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.652883 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23a78636-1947-4ed4-80a6-829225dc5e3b-utilities\") pod \"redhat-marketplace-8pxr4\" (UID: \"23a78636-1947-4ed4-80a6-829225dc5e3b\") " pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.652922 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vt8j\" (UniqueName: \"kubernetes.io/projected/23a78636-1947-4ed4-80a6-829225dc5e3b-kube-api-access-8vt8j\") pod \"redhat-marketplace-8pxr4\" (UID: \"23a78636-1947-4ed4-80a6-829225dc5e3b\") " pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.754457 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23a78636-1947-4ed4-80a6-829225dc5e3b-utilities\") pod \"redhat-marketplace-8pxr4\" (UID: \"23a78636-1947-4ed4-80a6-829225dc5e3b\") " pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.754519 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vt8j\" (UniqueName: \"kubernetes.io/projected/23a78636-1947-4ed4-80a6-829225dc5e3b-kube-api-access-8vt8j\") pod \"redhat-marketplace-8pxr4\" (UID: \"23a78636-1947-4ed4-80a6-829225dc5e3b\") " pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.754616 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23a78636-1947-4ed4-80a6-829225dc5e3b-catalog-content\") pod \"redhat-marketplace-8pxr4\" (UID: \"23a78636-1947-4ed4-80a6-829225dc5e3b\") " pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.755196 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23a78636-1947-4ed4-80a6-829225dc5e3b-catalog-content\") pod \"redhat-marketplace-8pxr4\" (UID: \"23a78636-1947-4ed4-80a6-829225dc5e3b\") " pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.755511 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23a78636-1947-4ed4-80a6-829225dc5e3b-utilities\") pod \"redhat-marketplace-8pxr4\" (UID: \"23a78636-1947-4ed4-80a6-829225dc5e3b\") " pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.780113 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vt8j\" (UniqueName: \"kubernetes.io/projected/23a78636-1947-4ed4-80a6-829225dc5e3b-kube-api-access-8vt8j\") pod \"redhat-marketplace-8pxr4\" (UID: \"23a78636-1947-4ed4-80a6-829225dc5e3b\") " pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:22:50 crc kubenswrapper[5055]: I1011 08:22:50.951676 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:22:51 crc kubenswrapper[5055]: I1011 08:22:51.368302 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8pxr4"] Oct 11 08:22:51 crc kubenswrapper[5055]: I1011 08:22:51.660850 5055 generic.go:334] "Generic (PLEG): container finished" podID="23a78636-1947-4ed4-80a6-829225dc5e3b" containerID="eff9939d1cf1a7eb1b7f7329c524327389d37534355750f3ec224d808e47ee31" exitCode=0 Oct 11 08:22:51 crc kubenswrapper[5055]: I1011 08:22:51.660919 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8pxr4" event={"ID":"23a78636-1947-4ed4-80a6-829225dc5e3b","Type":"ContainerDied","Data":"eff9939d1cf1a7eb1b7f7329c524327389d37534355750f3ec224d808e47ee31"} Oct 11 08:22:51 crc kubenswrapper[5055]: I1011 08:22:51.660985 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8pxr4" event={"ID":"23a78636-1947-4ed4-80a6-829225dc5e3b","Type":"ContainerStarted","Data":"674f6d5a4eea88539c490a789c3aae4b0fe9a41ad01f08fffbd3ddc83ff50826"} Oct 11 08:22:52 crc kubenswrapper[5055]: I1011 08:22:52.981401 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2d9gr"] Oct 11 08:22:52 crc kubenswrapper[5055]: I1011 08:22:52.985110 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:22:52 crc kubenswrapper[5055]: I1011 08:22:52.991585 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2d9gr"] Oct 11 08:22:52 crc kubenswrapper[5055]: I1011 08:22:52.998033 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1960305-9b38-47af-92ab-a225bef4819c-utilities\") pod \"community-operators-2d9gr\" (UID: \"c1960305-9b38-47af-92ab-a225bef4819c\") " pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:22:52 crc kubenswrapper[5055]: I1011 08:22:52.998241 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcmpg\" (UniqueName: \"kubernetes.io/projected/c1960305-9b38-47af-92ab-a225bef4819c-kube-api-access-tcmpg\") pod \"community-operators-2d9gr\" (UID: \"c1960305-9b38-47af-92ab-a225bef4819c\") " pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:22:52 crc kubenswrapper[5055]: I1011 08:22:52.998348 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1960305-9b38-47af-92ab-a225bef4819c-catalog-content\") pod \"community-operators-2d9gr\" (UID: \"c1960305-9b38-47af-92ab-a225bef4819c\") " pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:22:53 crc kubenswrapper[5055]: I1011 08:22:53.102094 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcmpg\" (UniqueName: \"kubernetes.io/projected/c1960305-9b38-47af-92ab-a225bef4819c-kube-api-access-tcmpg\") pod \"community-operators-2d9gr\" (UID: \"c1960305-9b38-47af-92ab-a225bef4819c\") " pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:22:53 crc kubenswrapper[5055]: I1011 08:22:53.102195 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1960305-9b38-47af-92ab-a225bef4819c-catalog-content\") pod \"community-operators-2d9gr\" (UID: \"c1960305-9b38-47af-92ab-a225bef4819c\") " pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:22:53 crc kubenswrapper[5055]: I1011 08:22:53.102333 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1960305-9b38-47af-92ab-a225bef4819c-utilities\") pod \"community-operators-2d9gr\" (UID: \"c1960305-9b38-47af-92ab-a225bef4819c\") " pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:22:53 crc kubenswrapper[5055]: I1011 08:22:53.102884 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1960305-9b38-47af-92ab-a225bef4819c-catalog-content\") pod \"community-operators-2d9gr\" (UID: \"c1960305-9b38-47af-92ab-a225bef4819c\") " pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:22:53 crc kubenswrapper[5055]: I1011 08:22:53.103002 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1960305-9b38-47af-92ab-a225bef4819c-utilities\") pod \"community-operators-2d9gr\" (UID: \"c1960305-9b38-47af-92ab-a225bef4819c\") " pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:22:53 crc kubenswrapper[5055]: I1011 08:22:53.121858 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcmpg\" (UniqueName: \"kubernetes.io/projected/c1960305-9b38-47af-92ab-a225bef4819c-kube-api-access-tcmpg\") pod \"community-operators-2d9gr\" (UID: \"c1960305-9b38-47af-92ab-a225bef4819c\") " pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:22:53 crc kubenswrapper[5055]: I1011 08:22:53.383317 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:22:53 crc kubenswrapper[5055]: I1011 08:22:53.677758 5055 generic.go:334] "Generic (PLEG): container finished" podID="23a78636-1947-4ed4-80a6-829225dc5e3b" containerID="fe51902e2f5cec220d4ca79a11f7343239b3c3e7835d3d58c2d0a9976606dbf4" exitCode=0 Oct 11 08:22:53 crc kubenswrapper[5055]: I1011 08:22:53.677819 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8pxr4" event={"ID":"23a78636-1947-4ed4-80a6-829225dc5e3b","Type":"ContainerDied","Data":"fe51902e2f5cec220d4ca79a11f7343239b3c3e7835d3d58c2d0a9976606dbf4"} Oct 11 08:22:53 crc kubenswrapper[5055]: I1011 08:22:53.827035 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2d9gr"] Oct 11 08:22:54 crc kubenswrapper[5055]: I1011 08:22:54.688300 5055 generic.go:334] "Generic (PLEG): container finished" podID="c1960305-9b38-47af-92ab-a225bef4819c" containerID="77b0949f8f6f46f3352d8de33d983afa5311093a78f699994ebaef372a5152a3" exitCode=0 Oct 11 08:22:54 crc kubenswrapper[5055]: I1011 08:22:54.688372 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2d9gr" event={"ID":"c1960305-9b38-47af-92ab-a225bef4819c","Type":"ContainerDied","Data":"77b0949f8f6f46f3352d8de33d983afa5311093a78f699994ebaef372a5152a3"} Oct 11 08:22:54 crc kubenswrapper[5055]: I1011 08:22:54.688624 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2d9gr" event={"ID":"c1960305-9b38-47af-92ab-a225bef4819c","Type":"ContainerStarted","Data":"30dfe0a6b2c80c8d4a2c9911868b5b806b0d1db9249a6a36ac8a284dbe6d5d49"} Oct 11 08:22:54 crc kubenswrapper[5055]: I1011 08:22:54.693506 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8pxr4" event={"ID":"23a78636-1947-4ed4-80a6-829225dc5e3b","Type":"ContainerStarted","Data":"c3308b95dab53005c0bbae7c09ae1241264ee1d2d940b385f5b544b76062675a"} Oct 11 08:22:54 crc kubenswrapper[5055]: I1011 08:22:54.729172 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8pxr4" podStartSLOduration=2.210606551 podStartE2EDuration="4.729154715s" podCreationTimestamp="2025-10-11 08:22:50 +0000 UTC" firstStartedPulling="2025-10-11 08:22:51.664570001 +0000 UTC m=+5355.438843798" lastFinishedPulling="2025-10-11 08:22:54.183118155 +0000 UTC m=+5357.957391962" observedRunningTime="2025-10-11 08:22:54.726464308 +0000 UTC m=+5358.500738135" watchObservedRunningTime="2025-10-11 08:22:54.729154715 +0000 UTC m=+5358.503428522" Oct 11 08:22:56 crc kubenswrapper[5055]: I1011 08:22:56.720472 5055 generic.go:334] "Generic (PLEG): container finished" podID="c1960305-9b38-47af-92ab-a225bef4819c" containerID="0681ba4d1ef99f72a6c430048a49d31efabad9632ebb4903a9be29ae499aeeb2" exitCode=0 Oct 11 08:22:56 crc kubenswrapper[5055]: I1011 08:22:56.720540 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2d9gr" event={"ID":"c1960305-9b38-47af-92ab-a225bef4819c","Type":"ContainerDied","Data":"0681ba4d1ef99f72a6c430048a49d31efabad9632ebb4903a9be29ae499aeeb2"} Oct 11 08:22:58 crc kubenswrapper[5055]: I1011 08:22:58.742517 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2d9gr" event={"ID":"c1960305-9b38-47af-92ab-a225bef4819c","Type":"ContainerStarted","Data":"48049a98c02ca49748f97d18d9fcfc4cf38a16aa2d982d2e2c4f5d9f76677a4b"} Oct 11 08:22:58 crc kubenswrapper[5055]: I1011 08:22:58.768562 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2d9gr" podStartSLOduration=3.795686038 podStartE2EDuration="6.768538878s" podCreationTimestamp="2025-10-11 08:22:52 +0000 UTC" firstStartedPulling="2025-10-11 08:22:54.690657621 +0000 UTC m=+5358.464931428" lastFinishedPulling="2025-10-11 08:22:57.663510441 +0000 UTC m=+5361.437784268" observedRunningTime="2025-10-11 08:22:58.767274602 +0000 UTC m=+5362.541548419" watchObservedRunningTime="2025-10-11 08:22:58.768538878 +0000 UTC m=+5362.542812695" Oct 11 08:23:00 crc kubenswrapper[5055]: I1011 08:23:00.952392 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:23:00 crc kubenswrapper[5055]: I1011 08:23:00.952460 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:23:01 crc kubenswrapper[5055]: I1011 08:23:01.005127 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:23:01 crc kubenswrapper[5055]: I1011 08:23:01.850566 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:23:01 crc kubenswrapper[5055]: I1011 08:23:01.915210 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8pxr4"] Oct 11 08:23:03 crc kubenswrapper[5055]: I1011 08:23:03.384314 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:23:03 crc kubenswrapper[5055]: I1011 08:23:03.384372 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:23:03 crc kubenswrapper[5055]: I1011 08:23:03.429854 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:23:03 crc kubenswrapper[5055]: I1011 08:23:03.778014 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8pxr4" podUID="23a78636-1947-4ed4-80a6-829225dc5e3b" containerName="registry-server" containerID="cri-o://c3308b95dab53005c0bbae7c09ae1241264ee1d2d940b385f5b544b76062675a" gracePeriod=2 Oct 11 08:23:03 crc kubenswrapper[5055]: I1011 08:23:03.824193 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.192266 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.375549 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23a78636-1947-4ed4-80a6-829225dc5e3b-utilities\") pod \"23a78636-1947-4ed4-80a6-829225dc5e3b\" (UID: \"23a78636-1947-4ed4-80a6-829225dc5e3b\") " Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.375664 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vt8j\" (UniqueName: \"kubernetes.io/projected/23a78636-1947-4ed4-80a6-829225dc5e3b-kube-api-access-8vt8j\") pod \"23a78636-1947-4ed4-80a6-829225dc5e3b\" (UID: \"23a78636-1947-4ed4-80a6-829225dc5e3b\") " Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.375734 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23a78636-1947-4ed4-80a6-829225dc5e3b-catalog-content\") pod \"23a78636-1947-4ed4-80a6-829225dc5e3b\" (UID: \"23a78636-1947-4ed4-80a6-829225dc5e3b\") " Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.376522 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23a78636-1947-4ed4-80a6-829225dc5e3b-utilities" (OuterVolumeSpecName: "utilities") pod "23a78636-1947-4ed4-80a6-829225dc5e3b" (UID: "23a78636-1947-4ed4-80a6-829225dc5e3b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.384289 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23a78636-1947-4ed4-80a6-829225dc5e3b-kube-api-access-8vt8j" (OuterVolumeSpecName: "kube-api-access-8vt8j") pod "23a78636-1947-4ed4-80a6-829225dc5e3b" (UID: "23a78636-1947-4ed4-80a6-829225dc5e3b"). InnerVolumeSpecName "kube-api-access-8vt8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.390053 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23a78636-1947-4ed4-80a6-829225dc5e3b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "23a78636-1947-4ed4-80a6-829225dc5e3b" (UID: "23a78636-1947-4ed4-80a6-829225dc5e3b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.477084 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vt8j\" (UniqueName: \"kubernetes.io/projected/23a78636-1947-4ed4-80a6-829225dc5e3b-kube-api-access-8vt8j\") on node \"crc\" DevicePath \"\"" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.477122 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23a78636-1947-4ed4-80a6-829225dc5e3b-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.477131 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23a78636-1947-4ed4-80a6-829225dc5e3b-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.787787 5055 generic.go:334] "Generic (PLEG): container finished" podID="23a78636-1947-4ed4-80a6-829225dc5e3b" containerID="c3308b95dab53005c0bbae7c09ae1241264ee1d2d940b385f5b544b76062675a" exitCode=0 Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.787874 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8pxr4" event={"ID":"23a78636-1947-4ed4-80a6-829225dc5e3b","Type":"ContainerDied","Data":"c3308b95dab53005c0bbae7c09ae1241264ee1d2d940b385f5b544b76062675a"} Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.787940 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8pxr4" event={"ID":"23a78636-1947-4ed4-80a6-829225dc5e3b","Type":"ContainerDied","Data":"674f6d5a4eea88539c490a789c3aae4b0fe9a41ad01f08fffbd3ddc83ff50826"} Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.787937 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8pxr4" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.787983 5055 scope.go:117] "RemoveContainer" containerID="c3308b95dab53005c0bbae7c09ae1241264ee1d2d940b385f5b544b76062675a" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.813985 5055 scope.go:117] "RemoveContainer" containerID="fe51902e2f5cec220d4ca79a11f7343239b3c3e7835d3d58c2d0a9976606dbf4" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.828559 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8pxr4"] Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.846445 5055 scope.go:117] "RemoveContainer" containerID="eff9939d1cf1a7eb1b7f7329c524327389d37534355750f3ec224d808e47ee31" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.851142 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8pxr4"] Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.872873 5055 scope.go:117] "RemoveContainer" containerID="c3308b95dab53005c0bbae7c09ae1241264ee1d2d940b385f5b544b76062675a" Oct 11 08:23:04 crc kubenswrapper[5055]: E1011 08:23:04.874253 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3308b95dab53005c0bbae7c09ae1241264ee1d2d940b385f5b544b76062675a\": container with ID starting with c3308b95dab53005c0bbae7c09ae1241264ee1d2d940b385f5b544b76062675a not found: ID does not exist" containerID="c3308b95dab53005c0bbae7c09ae1241264ee1d2d940b385f5b544b76062675a" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.874323 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3308b95dab53005c0bbae7c09ae1241264ee1d2d940b385f5b544b76062675a"} err="failed to get container status \"c3308b95dab53005c0bbae7c09ae1241264ee1d2d940b385f5b544b76062675a\": rpc error: code = NotFound desc = could not find container \"c3308b95dab53005c0bbae7c09ae1241264ee1d2d940b385f5b544b76062675a\": container with ID starting with c3308b95dab53005c0bbae7c09ae1241264ee1d2d940b385f5b544b76062675a not found: ID does not exist" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.874362 5055 scope.go:117] "RemoveContainer" containerID="fe51902e2f5cec220d4ca79a11f7343239b3c3e7835d3d58c2d0a9976606dbf4" Oct 11 08:23:04 crc kubenswrapper[5055]: E1011 08:23:04.876485 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe51902e2f5cec220d4ca79a11f7343239b3c3e7835d3d58c2d0a9976606dbf4\": container with ID starting with fe51902e2f5cec220d4ca79a11f7343239b3c3e7835d3d58c2d0a9976606dbf4 not found: ID does not exist" containerID="fe51902e2f5cec220d4ca79a11f7343239b3c3e7835d3d58c2d0a9976606dbf4" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.876545 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe51902e2f5cec220d4ca79a11f7343239b3c3e7835d3d58c2d0a9976606dbf4"} err="failed to get container status \"fe51902e2f5cec220d4ca79a11f7343239b3c3e7835d3d58c2d0a9976606dbf4\": rpc error: code = NotFound desc = could not find container \"fe51902e2f5cec220d4ca79a11f7343239b3c3e7835d3d58c2d0a9976606dbf4\": container with ID starting with fe51902e2f5cec220d4ca79a11f7343239b3c3e7835d3d58c2d0a9976606dbf4 not found: ID does not exist" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.876577 5055 scope.go:117] "RemoveContainer" containerID="eff9939d1cf1a7eb1b7f7329c524327389d37534355750f3ec224d808e47ee31" Oct 11 08:23:04 crc kubenswrapper[5055]: E1011 08:23:04.876977 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eff9939d1cf1a7eb1b7f7329c524327389d37534355750f3ec224d808e47ee31\": container with ID starting with eff9939d1cf1a7eb1b7f7329c524327389d37534355750f3ec224d808e47ee31 not found: ID does not exist" containerID="eff9939d1cf1a7eb1b7f7329c524327389d37534355750f3ec224d808e47ee31" Oct 11 08:23:04 crc kubenswrapper[5055]: I1011 08:23:04.877018 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eff9939d1cf1a7eb1b7f7329c524327389d37534355750f3ec224d808e47ee31"} err="failed to get container status \"eff9939d1cf1a7eb1b7f7329c524327389d37534355750f3ec224d808e47ee31\": rpc error: code = NotFound desc = could not find container \"eff9939d1cf1a7eb1b7f7329c524327389d37534355750f3ec224d808e47ee31\": container with ID starting with eff9939d1cf1a7eb1b7f7329c524327389d37534355750f3ec224d808e47ee31 not found: ID does not exist" Oct 11 08:23:05 crc kubenswrapper[5055]: I1011 08:23:05.001537 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23a78636-1947-4ed4-80a6-829225dc5e3b" path="/var/lib/kubelet/pods/23a78636-1947-4ed4-80a6-829225dc5e3b/volumes" Oct 11 08:23:05 crc kubenswrapper[5055]: I1011 08:23:05.564264 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2d9gr"] Oct 11 08:23:05 crc kubenswrapper[5055]: I1011 08:23:05.807999 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2d9gr" podUID="c1960305-9b38-47af-92ab-a225bef4819c" containerName="registry-server" containerID="cri-o://48049a98c02ca49748f97d18d9fcfc4cf38a16aa2d982d2e2c4f5d9f76677a4b" gracePeriod=2 Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.212112 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.401108 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tcmpg\" (UniqueName: \"kubernetes.io/projected/c1960305-9b38-47af-92ab-a225bef4819c-kube-api-access-tcmpg\") pod \"c1960305-9b38-47af-92ab-a225bef4819c\" (UID: \"c1960305-9b38-47af-92ab-a225bef4819c\") " Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.401267 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1960305-9b38-47af-92ab-a225bef4819c-utilities\") pod \"c1960305-9b38-47af-92ab-a225bef4819c\" (UID: \"c1960305-9b38-47af-92ab-a225bef4819c\") " Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.401352 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1960305-9b38-47af-92ab-a225bef4819c-catalog-content\") pod \"c1960305-9b38-47af-92ab-a225bef4819c\" (UID: \"c1960305-9b38-47af-92ab-a225bef4819c\") " Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.402082 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1960305-9b38-47af-92ab-a225bef4819c-utilities" (OuterVolumeSpecName: "utilities") pod "c1960305-9b38-47af-92ab-a225bef4819c" (UID: "c1960305-9b38-47af-92ab-a225bef4819c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.416012 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1960305-9b38-47af-92ab-a225bef4819c-kube-api-access-tcmpg" (OuterVolumeSpecName: "kube-api-access-tcmpg") pod "c1960305-9b38-47af-92ab-a225bef4819c" (UID: "c1960305-9b38-47af-92ab-a225bef4819c"). InnerVolumeSpecName "kube-api-access-tcmpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.504030 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tcmpg\" (UniqueName: \"kubernetes.io/projected/c1960305-9b38-47af-92ab-a225bef4819c-kube-api-access-tcmpg\") on node \"crc\" DevicePath \"\"" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.504072 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1960305-9b38-47af-92ab-a225bef4819c-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.699283 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1960305-9b38-47af-92ab-a225bef4819c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c1960305-9b38-47af-92ab-a225bef4819c" (UID: "c1960305-9b38-47af-92ab-a225bef4819c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.708356 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1960305-9b38-47af-92ab-a225bef4819c-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.816668 5055 generic.go:334] "Generic (PLEG): container finished" podID="c1960305-9b38-47af-92ab-a225bef4819c" containerID="48049a98c02ca49748f97d18d9fcfc4cf38a16aa2d982d2e2c4f5d9f76677a4b" exitCode=0 Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.816742 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2d9gr" event={"ID":"c1960305-9b38-47af-92ab-a225bef4819c","Type":"ContainerDied","Data":"48049a98c02ca49748f97d18d9fcfc4cf38a16aa2d982d2e2c4f5d9f76677a4b"} Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.816772 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2d9gr" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.816788 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2d9gr" event={"ID":"c1960305-9b38-47af-92ab-a225bef4819c","Type":"ContainerDied","Data":"30dfe0a6b2c80c8d4a2c9911868b5b806b0d1db9249a6a36ac8a284dbe6d5d49"} Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.816811 5055 scope.go:117] "RemoveContainer" containerID="48049a98c02ca49748f97d18d9fcfc4cf38a16aa2d982d2e2c4f5d9f76677a4b" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.842097 5055 scope.go:117] "RemoveContainer" containerID="0681ba4d1ef99f72a6c430048a49d31efabad9632ebb4903a9be29ae499aeeb2" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.856908 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2d9gr"] Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.865939 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2d9gr"] Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.875449 5055 scope.go:117] "RemoveContainer" containerID="77b0949f8f6f46f3352d8de33d983afa5311093a78f699994ebaef372a5152a3" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.896919 5055 scope.go:117] "RemoveContainer" containerID="48049a98c02ca49748f97d18d9fcfc4cf38a16aa2d982d2e2c4f5d9f76677a4b" Oct 11 08:23:06 crc kubenswrapper[5055]: E1011 08:23:06.897315 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48049a98c02ca49748f97d18d9fcfc4cf38a16aa2d982d2e2c4f5d9f76677a4b\": container with ID starting with 48049a98c02ca49748f97d18d9fcfc4cf38a16aa2d982d2e2c4f5d9f76677a4b not found: ID does not exist" containerID="48049a98c02ca49748f97d18d9fcfc4cf38a16aa2d982d2e2c4f5d9f76677a4b" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.897365 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48049a98c02ca49748f97d18d9fcfc4cf38a16aa2d982d2e2c4f5d9f76677a4b"} err="failed to get container status \"48049a98c02ca49748f97d18d9fcfc4cf38a16aa2d982d2e2c4f5d9f76677a4b\": rpc error: code = NotFound desc = could not find container \"48049a98c02ca49748f97d18d9fcfc4cf38a16aa2d982d2e2c4f5d9f76677a4b\": container with ID starting with 48049a98c02ca49748f97d18d9fcfc4cf38a16aa2d982d2e2c4f5d9f76677a4b not found: ID does not exist" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.897408 5055 scope.go:117] "RemoveContainer" containerID="0681ba4d1ef99f72a6c430048a49d31efabad9632ebb4903a9be29ae499aeeb2" Oct 11 08:23:06 crc kubenswrapper[5055]: E1011 08:23:06.897692 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0681ba4d1ef99f72a6c430048a49d31efabad9632ebb4903a9be29ae499aeeb2\": container with ID starting with 0681ba4d1ef99f72a6c430048a49d31efabad9632ebb4903a9be29ae499aeeb2 not found: ID does not exist" containerID="0681ba4d1ef99f72a6c430048a49d31efabad9632ebb4903a9be29ae499aeeb2" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.897719 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0681ba4d1ef99f72a6c430048a49d31efabad9632ebb4903a9be29ae499aeeb2"} err="failed to get container status \"0681ba4d1ef99f72a6c430048a49d31efabad9632ebb4903a9be29ae499aeeb2\": rpc error: code = NotFound desc = could not find container \"0681ba4d1ef99f72a6c430048a49d31efabad9632ebb4903a9be29ae499aeeb2\": container with ID starting with 0681ba4d1ef99f72a6c430048a49d31efabad9632ebb4903a9be29ae499aeeb2 not found: ID does not exist" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.897739 5055 scope.go:117] "RemoveContainer" containerID="77b0949f8f6f46f3352d8de33d983afa5311093a78f699994ebaef372a5152a3" Oct 11 08:23:06 crc kubenswrapper[5055]: E1011 08:23:06.897941 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77b0949f8f6f46f3352d8de33d983afa5311093a78f699994ebaef372a5152a3\": container with ID starting with 77b0949f8f6f46f3352d8de33d983afa5311093a78f699994ebaef372a5152a3 not found: ID does not exist" containerID="77b0949f8f6f46f3352d8de33d983afa5311093a78f699994ebaef372a5152a3" Oct 11 08:23:06 crc kubenswrapper[5055]: I1011 08:23:06.897974 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77b0949f8f6f46f3352d8de33d983afa5311093a78f699994ebaef372a5152a3"} err="failed to get container status \"77b0949f8f6f46f3352d8de33d983afa5311093a78f699994ebaef372a5152a3\": rpc error: code = NotFound desc = could not find container \"77b0949f8f6f46f3352d8de33d983afa5311093a78f699994ebaef372a5152a3\": container with ID starting with 77b0949f8f6f46f3352d8de33d983afa5311093a78f699994ebaef372a5152a3 not found: ID does not exist" Oct 11 08:23:07 crc kubenswrapper[5055]: I1011 08:23:07.002408 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1960305-9b38-47af-92ab-a225bef4819c" path="/var/lib/kubelet/pods/c1960305-9b38-47af-92ab-a225bef4819c/volumes" Oct 11 08:23:32 crc kubenswrapper[5055]: I1011 08:23:32.422446 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:23:32 crc kubenswrapper[5055]: I1011 08:23:32.423244 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:24:02 crc kubenswrapper[5055]: I1011 08:24:02.422367 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:24:02 crc kubenswrapper[5055]: I1011 08:24:02.422847 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:24:32 crc kubenswrapper[5055]: I1011 08:24:32.421870 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:24:32 crc kubenswrapper[5055]: I1011 08:24:32.422524 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:24:32 crc kubenswrapper[5055]: I1011 08:24:32.422574 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 08:24:32 crc kubenswrapper[5055]: I1011 08:24:32.423114 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a3401eca5335f09c8656b73a49289e48163ae3cf23e62a6059ceeca3ca3db7de"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 08:24:32 crc kubenswrapper[5055]: I1011 08:24:32.423176 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://a3401eca5335f09c8656b73a49289e48163ae3cf23e62a6059ceeca3ca3db7de" gracePeriod=600 Oct 11 08:24:33 crc kubenswrapper[5055]: I1011 08:24:33.460510 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="a3401eca5335f09c8656b73a49289e48163ae3cf23e62a6059ceeca3ca3db7de" exitCode=0 Oct 11 08:24:33 crc kubenswrapper[5055]: I1011 08:24:33.460940 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"a3401eca5335f09c8656b73a49289e48163ae3cf23e62a6059ceeca3ca3db7de"} Oct 11 08:24:33 crc kubenswrapper[5055]: I1011 08:24:33.461096 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1"} Oct 11 08:24:33 crc kubenswrapper[5055]: I1011 08:24:33.461136 5055 scope.go:117] "RemoveContainer" containerID="4f8c883a456c7390553d1e1b93a09d17f8015ca113adff47cea4bb0b7163a92f" Oct 11 08:26:32 crc kubenswrapper[5055]: I1011 08:26:32.421855 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:26:32 crc kubenswrapper[5055]: I1011 08:26:32.424507 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:26:46 crc kubenswrapper[5055]: I1011 08:26:46.903781 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r62mw"] Oct 11 08:26:46 crc kubenswrapper[5055]: E1011 08:26:46.904688 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1960305-9b38-47af-92ab-a225bef4819c" containerName="extract-utilities" Oct 11 08:26:46 crc kubenswrapper[5055]: I1011 08:26:46.904706 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1960305-9b38-47af-92ab-a225bef4819c" containerName="extract-utilities" Oct 11 08:26:46 crc kubenswrapper[5055]: E1011 08:26:46.904722 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1960305-9b38-47af-92ab-a225bef4819c" containerName="registry-server" Oct 11 08:26:46 crc kubenswrapper[5055]: I1011 08:26:46.904729 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1960305-9b38-47af-92ab-a225bef4819c" containerName="registry-server" Oct 11 08:26:46 crc kubenswrapper[5055]: E1011 08:26:46.904750 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23a78636-1947-4ed4-80a6-829225dc5e3b" containerName="extract-utilities" Oct 11 08:26:46 crc kubenswrapper[5055]: I1011 08:26:46.904757 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="23a78636-1947-4ed4-80a6-829225dc5e3b" containerName="extract-utilities" Oct 11 08:26:46 crc kubenswrapper[5055]: E1011 08:26:46.904837 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23a78636-1947-4ed4-80a6-829225dc5e3b" containerName="registry-server" Oct 11 08:26:46 crc kubenswrapper[5055]: I1011 08:26:46.904845 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="23a78636-1947-4ed4-80a6-829225dc5e3b" containerName="registry-server" Oct 11 08:26:46 crc kubenswrapper[5055]: E1011 08:26:46.904873 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23a78636-1947-4ed4-80a6-829225dc5e3b" containerName="extract-content" Oct 11 08:26:46 crc kubenswrapper[5055]: I1011 08:26:46.904881 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="23a78636-1947-4ed4-80a6-829225dc5e3b" containerName="extract-content" Oct 11 08:26:46 crc kubenswrapper[5055]: E1011 08:26:46.904895 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1960305-9b38-47af-92ab-a225bef4819c" containerName="extract-content" Oct 11 08:26:46 crc kubenswrapper[5055]: I1011 08:26:46.904903 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1960305-9b38-47af-92ab-a225bef4819c" containerName="extract-content" Oct 11 08:26:46 crc kubenswrapper[5055]: I1011 08:26:46.905090 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="23a78636-1947-4ed4-80a6-829225dc5e3b" containerName="registry-server" Oct 11 08:26:46 crc kubenswrapper[5055]: I1011 08:26:46.905124 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1960305-9b38-47af-92ab-a225bef4819c" containerName="registry-server" Oct 11 08:26:46 crc kubenswrapper[5055]: I1011 08:26:46.906324 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:46 crc kubenswrapper[5055]: I1011 08:26:46.923012 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r62mw"] Oct 11 08:26:47 crc kubenswrapper[5055]: I1011 08:26:47.094781 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bba6391-f20d-46b5-a441-125a38045ec7-catalog-content\") pod \"certified-operators-r62mw\" (UID: \"4bba6391-f20d-46b5-a441-125a38045ec7\") " pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:47 crc kubenswrapper[5055]: I1011 08:26:47.094859 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bba6391-f20d-46b5-a441-125a38045ec7-utilities\") pod \"certified-operators-r62mw\" (UID: \"4bba6391-f20d-46b5-a441-125a38045ec7\") " pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:47 crc kubenswrapper[5055]: I1011 08:26:47.094922 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kq66n\" (UniqueName: \"kubernetes.io/projected/4bba6391-f20d-46b5-a441-125a38045ec7-kube-api-access-kq66n\") pod \"certified-operators-r62mw\" (UID: \"4bba6391-f20d-46b5-a441-125a38045ec7\") " pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:47 crc kubenswrapper[5055]: I1011 08:26:47.196448 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kq66n\" (UniqueName: \"kubernetes.io/projected/4bba6391-f20d-46b5-a441-125a38045ec7-kube-api-access-kq66n\") pod \"certified-operators-r62mw\" (UID: \"4bba6391-f20d-46b5-a441-125a38045ec7\") " pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:47 crc kubenswrapper[5055]: I1011 08:26:47.196779 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bba6391-f20d-46b5-a441-125a38045ec7-catalog-content\") pod \"certified-operators-r62mw\" (UID: \"4bba6391-f20d-46b5-a441-125a38045ec7\") " pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:47 crc kubenswrapper[5055]: I1011 08:26:47.196919 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bba6391-f20d-46b5-a441-125a38045ec7-utilities\") pod \"certified-operators-r62mw\" (UID: \"4bba6391-f20d-46b5-a441-125a38045ec7\") " pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:47 crc kubenswrapper[5055]: I1011 08:26:47.197440 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bba6391-f20d-46b5-a441-125a38045ec7-utilities\") pod \"certified-operators-r62mw\" (UID: \"4bba6391-f20d-46b5-a441-125a38045ec7\") " pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:47 crc kubenswrapper[5055]: I1011 08:26:47.197608 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bba6391-f20d-46b5-a441-125a38045ec7-catalog-content\") pod \"certified-operators-r62mw\" (UID: \"4bba6391-f20d-46b5-a441-125a38045ec7\") " pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:47 crc kubenswrapper[5055]: I1011 08:26:47.233387 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kq66n\" (UniqueName: \"kubernetes.io/projected/4bba6391-f20d-46b5-a441-125a38045ec7-kube-api-access-kq66n\") pod \"certified-operators-r62mw\" (UID: \"4bba6391-f20d-46b5-a441-125a38045ec7\") " pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:47 crc kubenswrapper[5055]: I1011 08:26:47.522452 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:47 crc kubenswrapper[5055]: I1011 08:26:47.976804 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r62mw"] Oct 11 08:26:48 crc kubenswrapper[5055]: I1011 08:26:48.462271 5055 generic.go:334] "Generic (PLEG): container finished" podID="4bba6391-f20d-46b5-a441-125a38045ec7" containerID="1a61119b88497b779bef6793a2de30f6df4511085525ea8d64a1b5b74f8a741c" exitCode=0 Oct 11 08:26:48 crc kubenswrapper[5055]: I1011 08:26:48.462377 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r62mw" event={"ID":"4bba6391-f20d-46b5-a441-125a38045ec7","Type":"ContainerDied","Data":"1a61119b88497b779bef6793a2de30f6df4511085525ea8d64a1b5b74f8a741c"} Oct 11 08:26:48 crc kubenswrapper[5055]: I1011 08:26:48.462603 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r62mw" event={"ID":"4bba6391-f20d-46b5-a441-125a38045ec7","Type":"ContainerStarted","Data":"4a2c79c90df8e623262aee55338c7eef10fe4e4fa0687ce454dd746a097b93b1"} Oct 11 08:26:48 crc kubenswrapper[5055]: I1011 08:26:48.464090 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 08:26:49 crc kubenswrapper[5055]: I1011 08:26:49.487417 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r62mw" event={"ID":"4bba6391-f20d-46b5-a441-125a38045ec7","Type":"ContainerStarted","Data":"26fe8a54207ef1cc021927525e0461e65a2fdbf8f9e9c28f397ec028021ebf3f"} Oct 11 08:26:50 crc kubenswrapper[5055]: I1011 08:26:50.495718 5055 generic.go:334] "Generic (PLEG): container finished" podID="4bba6391-f20d-46b5-a441-125a38045ec7" containerID="26fe8a54207ef1cc021927525e0461e65a2fdbf8f9e9c28f397ec028021ebf3f" exitCode=0 Oct 11 08:26:50 crc kubenswrapper[5055]: I1011 08:26:50.495779 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r62mw" event={"ID":"4bba6391-f20d-46b5-a441-125a38045ec7","Type":"ContainerDied","Data":"26fe8a54207ef1cc021927525e0461e65a2fdbf8f9e9c28f397ec028021ebf3f"} Oct 11 08:26:51 crc kubenswrapper[5055]: I1011 08:26:51.503665 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r62mw" event={"ID":"4bba6391-f20d-46b5-a441-125a38045ec7","Type":"ContainerStarted","Data":"3434f8faa0ff00c99f37741ac60ba19930a2ed86aed85c3fab380ab7948d402e"} Oct 11 08:26:51 crc kubenswrapper[5055]: I1011 08:26:51.522807 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r62mw" podStartSLOduration=3.063136131 podStartE2EDuration="5.522757972s" podCreationTimestamp="2025-10-11 08:26:46 +0000 UTC" firstStartedPulling="2025-10-11 08:26:48.463873559 +0000 UTC m=+5592.238147366" lastFinishedPulling="2025-10-11 08:26:50.9234954 +0000 UTC m=+5594.697769207" observedRunningTime="2025-10-11 08:26:51.520666212 +0000 UTC m=+5595.294940029" watchObservedRunningTime="2025-10-11 08:26:51.522757972 +0000 UTC m=+5595.297031789" Oct 11 08:26:57 crc kubenswrapper[5055]: I1011 08:26:57.522655 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:57 crc kubenswrapper[5055]: I1011 08:26:57.523077 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:57 crc kubenswrapper[5055]: I1011 08:26:57.582364 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:57 crc kubenswrapper[5055]: I1011 08:26:57.635055 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:26:57 crc kubenswrapper[5055]: I1011 08:26:57.831917 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r62mw"] Oct 11 08:26:59 crc kubenswrapper[5055]: I1011 08:26:59.567243 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r62mw" podUID="4bba6391-f20d-46b5-a441-125a38045ec7" containerName="registry-server" containerID="cri-o://3434f8faa0ff00c99f37741ac60ba19930a2ed86aed85c3fab380ab7948d402e" gracePeriod=2 Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.449573 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.496938 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bba6391-f20d-46b5-a441-125a38045ec7-utilities\") pod \"4bba6391-f20d-46b5-a441-125a38045ec7\" (UID: \"4bba6391-f20d-46b5-a441-125a38045ec7\") " Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.497024 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kq66n\" (UniqueName: \"kubernetes.io/projected/4bba6391-f20d-46b5-a441-125a38045ec7-kube-api-access-kq66n\") pod \"4bba6391-f20d-46b5-a441-125a38045ec7\" (UID: \"4bba6391-f20d-46b5-a441-125a38045ec7\") " Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.497154 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bba6391-f20d-46b5-a441-125a38045ec7-catalog-content\") pod \"4bba6391-f20d-46b5-a441-125a38045ec7\" (UID: \"4bba6391-f20d-46b5-a441-125a38045ec7\") " Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.497977 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4bba6391-f20d-46b5-a441-125a38045ec7-utilities" (OuterVolumeSpecName: "utilities") pod "4bba6391-f20d-46b5-a441-125a38045ec7" (UID: "4bba6391-f20d-46b5-a441-125a38045ec7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.502644 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bba6391-f20d-46b5-a441-125a38045ec7-kube-api-access-kq66n" (OuterVolumeSpecName: "kube-api-access-kq66n") pod "4bba6391-f20d-46b5-a441-125a38045ec7" (UID: "4bba6391-f20d-46b5-a441-125a38045ec7"). InnerVolumeSpecName "kube-api-access-kq66n". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.549714 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4bba6391-f20d-46b5-a441-125a38045ec7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4bba6391-f20d-46b5-a441-125a38045ec7" (UID: "4bba6391-f20d-46b5-a441-125a38045ec7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.576055 5055 generic.go:334] "Generic (PLEG): container finished" podID="4bba6391-f20d-46b5-a441-125a38045ec7" containerID="3434f8faa0ff00c99f37741ac60ba19930a2ed86aed85c3fab380ab7948d402e" exitCode=0 Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.576095 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r62mw" event={"ID":"4bba6391-f20d-46b5-a441-125a38045ec7","Type":"ContainerDied","Data":"3434f8faa0ff00c99f37741ac60ba19930a2ed86aed85c3fab380ab7948d402e"} Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.576123 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r62mw" event={"ID":"4bba6391-f20d-46b5-a441-125a38045ec7","Type":"ContainerDied","Data":"4a2c79c90df8e623262aee55338c7eef10fe4e4fa0687ce454dd746a097b93b1"} Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.576139 5055 scope.go:117] "RemoveContainer" containerID="3434f8faa0ff00c99f37741ac60ba19930a2ed86aed85c3fab380ab7948d402e" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.576142 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r62mw" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.596796 5055 scope.go:117] "RemoveContainer" containerID="26fe8a54207ef1cc021927525e0461e65a2fdbf8f9e9c28f397ec028021ebf3f" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.598232 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kq66n\" (UniqueName: \"kubernetes.io/projected/4bba6391-f20d-46b5-a441-125a38045ec7-kube-api-access-kq66n\") on node \"crc\" DevicePath \"\"" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.598261 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bba6391-f20d-46b5-a441-125a38045ec7-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.598272 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bba6391-f20d-46b5-a441-125a38045ec7-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.610317 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r62mw"] Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.614835 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r62mw"] Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.629557 5055 scope.go:117] "RemoveContainer" containerID="1a61119b88497b779bef6793a2de30f6df4511085525ea8d64a1b5b74f8a741c" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.646338 5055 scope.go:117] "RemoveContainer" containerID="3434f8faa0ff00c99f37741ac60ba19930a2ed86aed85c3fab380ab7948d402e" Oct 11 08:27:00 crc kubenswrapper[5055]: E1011 08:27:00.649139 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3434f8faa0ff00c99f37741ac60ba19930a2ed86aed85c3fab380ab7948d402e\": container with ID starting with 3434f8faa0ff00c99f37741ac60ba19930a2ed86aed85c3fab380ab7948d402e not found: ID does not exist" containerID="3434f8faa0ff00c99f37741ac60ba19930a2ed86aed85c3fab380ab7948d402e" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.649211 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3434f8faa0ff00c99f37741ac60ba19930a2ed86aed85c3fab380ab7948d402e"} err="failed to get container status \"3434f8faa0ff00c99f37741ac60ba19930a2ed86aed85c3fab380ab7948d402e\": rpc error: code = NotFound desc = could not find container \"3434f8faa0ff00c99f37741ac60ba19930a2ed86aed85c3fab380ab7948d402e\": container with ID starting with 3434f8faa0ff00c99f37741ac60ba19930a2ed86aed85c3fab380ab7948d402e not found: ID does not exist" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.649243 5055 scope.go:117] "RemoveContainer" containerID="26fe8a54207ef1cc021927525e0461e65a2fdbf8f9e9c28f397ec028021ebf3f" Oct 11 08:27:00 crc kubenswrapper[5055]: E1011 08:27:00.649563 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26fe8a54207ef1cc021927525e0461e65a2fdbf8f9e9c28f397ec028021ebf3f\": container with ID starting with 26fe8a54207ef1cc021927525e0461e65a2fdbf8f9e9c28f397ec028021ebf3f not found: ID does not exist" containerID="26fe8a54207ef1cc021927525e0461e65a2fdbf8f9e9c28f397ec028021ebf3f" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.649591 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26fe8a54207ef1cc021927525e0461e65a2fdbf8f9e9c28f397ec028021ebf3f"} err="failed to get container status \"26fe8a54207ef1cc021927525e0461e65a2fdbf8f9e9c28f397ec028021ebf3f\": rpc error: code = NotFound desc = could not find container \"26fe8a54207ef1cc021927525e0461e65a2fdbf8f9e9c28f397ec028021ebf3f\": container with ID starting with 26fe8a54207ef1cc021927525e0461e65a2fdbf8f9e9c28f397ec028021ebf3f not found: ID does not exist" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.649620 5055 scope.go:117] "RemoveContainer" containerID="1a61119b88497b779bef6793a2de30f6df4511085525ea8d64a1b5b74f8a741c" Oct 11 08:27:00 crc kubenswrapper[5055]: E1011 08:27:00.649885 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a61119b88497b779bef6793a2de30f6df4511085525ea8d64a1b5b74f8a741c\": container with ID starting with 1a61119b88497b779bef6793a2de30f6df4511085525ea8d64a1b5b74f8a741c not found: ID does not exist" containerID="1a61119b88497b779bef6793a2de30f6df4511085525ea8d64a1b5b74f8a741c" Oct 11 08:27:00 crc kubenswrapper[5055]: I1011 08:27:00.649941 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a61119b88497b779bef6793a2de30f6df4511085525ea8d64a1b5b74f8a741c"} err="failed to get container status \"1a61119b88497b779bef6793a2de30f6df4511085525ea8d64a1b5b74f8a741c\": rpc error: code = NotFound desc = could not find container \"1a61119b88497b779bef6793a2de30f6df4511085525ea8d64a1b5b74f8a741c\": container with ID starting with 1a61119b88497b779bef6793a2de30f6df4511085525ea8d64a1b5b74f8a741c not found: ID does not exist" Oct 11 08:27:01 crc kubenswrapper[5055]: I1011 08:27:01.007364 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bba6391-f20d-46b5-a441-125a38045ec7" path="/var/lib/kubelet/pods/4bba6391-f20d-46b5-a441-125a38045ec7/volumes" Oct 11 08:27:02 crc kubenswrapper[5055]: I1011 08:27:02.422510 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:27:02 crc kubenswrapper[5055]: I1011 08:27:02.422605 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:27:32 crc kubenswrapper[5055]: I1011 08:27:32.422255 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:27:32 crc kubenswrapper[5055]: I1011 08:27:32.423279 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:27:32 crc kubenswrapper[5055]: I1011 08:27:32.423366 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 08:27:32 crc kubenswrapper[5055]: I1011 08:27:32.424385 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 08:27:32 crc kubenswrapper[5055]: I1011 08:27:32.424479 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" gracePeriod=600 Oct 11 08:27:32 crc kubenswrapper[5055]: E1011 08:27:32.543452 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:27:32 crc kubenswrapper[5055]: I1011 08:27:32.828973 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" exitCode=0 Oct 11 08:27:32 crc kubenswrapper[5055]: I1011 08:27:32.829052 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1"} Oct 11 08:27:32 crc kubenswrapper[5055]: I1011 08:27:32.829107 5055 scope.go:117] "RemoveContainer" containerID="a3401eca5335f09c8656b73a49289e48163ae3cf23e62a6059ceeca3ca3db7de" Oct 11 08:27:32 crc kubenswrapper[5055]: I1011 08:27:32.829932 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:27:32 crc kubenswrapper[5055]: E1011 08:27:32.830361 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:27:46 crc kubenswrapper[5055]: I1011 08:27:46.997046 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:27:46 crc kubenswrapper[5055]: E1011 08:27:46.997876 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:27:59 crc kubenswrapper[5055]: I1011 08:27:59.993435 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:27:59 crc kubenswrapper[5055]: E1011 08:27:59.994114 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:28:14 crc kubenswrapper[5055]: I1011 08:28:14.994074 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:28:14 crc kubenswrapper[5055]: E1011 08:28:14.995160 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:28:29 crc kubenswrapper[5055]: I1011 08:28:29.994106 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:28:29 crc kubenswrapper[5055]: E1011 08:28:29.994859 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:28:40 crc kubenswrapper[5055]: I1011 08:28:40.994254 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:28:40 crc kubenswrapper[5055]: E1011 08:28:40.994857 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:28:55 crc kubenswrapper[5055]: I1011 08:28:55.992928 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:28:55 crc kubenswrapper[5055]: E1011 08:28:55.993699 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:29:08 crc kubenswrapper[5055]: I1011 08:29:08.994183 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:29:08 crc kubenswrapper[5055]: E1011 08:29:08.995410 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:29:23 crc kubenswrapper[5055]: I1011 08:29:23.993191 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:29:23 crc kubenswrapper[5055]: E1011 08:29:23.994060 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:29:37 crc kubenswrapper[5055]: I1011 08:29:37.995226 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:29:37 crc kubenswrapper[5055]: E1011 08:29:37.996134 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:29:51 crc kubenswrapper[5055]: I1011 08:29:51.993268 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:29:51 crc kubenswrapper[5055]: E1011 08:29:51.994020 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.140383 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln"] Oct 11 08:30:00 crc kubenswrapper[5055]: E1011 08:30:00.142347 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bba6391-f20d-46b5-a441-125a38045ec7" containerName="extract-content" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.142462 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bba6391-f20d-46b5-a441-125a38045ec7" containerName="extract-content" Oct 11 08:30:00 crc kubenswrapper[5055]: E1011 08:30:00.142565 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bba6391-f20d-46b5-a441-125a38045ec7" containerName="extract-utilities" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.142643 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bba6391-f20d-46b5-a441-125a38045ec7" containerName="extract-utilities" Oct 11 08:30:00 crc kubenswrapper[5055]: E1011 08:30:00.142717 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bba6391-f20d-46b5-a441-125a38045ec7" containerName="registry-server" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.142791 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bba6391-f20d-46b5-a441-125a38045ec7" containerName="registry-server" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.143086 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bba6391-f20d-46b5-a441-125a38045ec7" containerName="registry-server" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.143657 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.146876 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln"] Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.147043 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.147416 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.264301 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/295550b8-f3fb-4687-a147-c3ee81bb20a3-secret-volume\") pod \"collect-profiles-29336190-b8xln\" (UID: \"295550b8-f3fb-4687-a147-c3ee81bb20a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.264356 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/295550b8-f3fb-4687-a147-c3ee81bb20a3-config-volume\") pod \"collect-profiles-29336190-b8xln\" (UID: \"295550b8-f3fb-4687-a147-c3ee81bb20a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.264398 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlrv2\" (UniqueName: \"kubernetes.io/projected/295550b8-f3fb-4687-a147-c3ee81bb20a3-kube-api-access-wlrv2\") pod \"collect-profiles-29336190-b8xln\" (UID: \"295550b8-f3fb-4687-a147-c3ee81bb20a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.365952 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/295550b8-f3fb-4687-a147-c3ee81bb20a3-secret-volume\") pod \"collect-profiles-29336190-b8xln\" (UID: \"295550b8-f3fb-4687-a147-c3ee81bb20a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.366118 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/295550b8-f3fb-4687-a147-c3ee81bb20a3-config-volume\") pod \"collect-profiles-29336190-b8xln\" (UID: \"295550b8-f3fb-4687-a147-c3ee81bb20a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.366236 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlrv2\" (UniqueName: \"kubernetes.io/projected/295550b8-f3fb-4687-a147-c3ee81bb20a3-kube-api-access-wlrv2\") pod \"collect-profiles-29336190-b8xln\" (UID: \"295550b8-f3fb-4687-a147-c3ee81bb20a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.367015 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/295550b8-f3fb-4687-a147-c3ee81bb20a3-config-volume\") pod \"collect-profiles-29336190-b8xln\" (UID: \"295550b8-f3fb-4687-a147-c3ee81bb20a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.371556 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/295550b8-f3fb-4687-a147-c3ee81bb20a3-secret-volume\") pod \"collect-profiles-29336190-b8xln\" (UID: \"295550b8-f3fb-4687-a147-c3ee81bb20a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.384236 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlrv2\" (UniqueName: \"kubernetes.io/projected/295550b8-f3fb-4687-a147-c3ee81bb20a3-kube-api-access-wlrv2\") pod \"collect-profiles-29336190-b8xln\" (UID: \"295550b8-f3fb-4687-a147-c3ee81bb20a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.467266 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" Oct 11 08:30:00 crc kubenswrapper[5055]: I1011 08:30:00.847792 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln"] Oct 11 08:30:01 crc kubenswrapper[5055]: I1011 08:30:01.147795 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" event={"ID":"295550b8-f3fb-4687-a147-c3ee81bb20a3","Type":"ContainerStarted","Data":"eb76c84a0983f26da99f364e5206741b7b61a65d6e7430bdbec22440b45d8565"} Oct 11 08:30:01 crc kubenswrapper[5055]: I1011 08:30:01.148076 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" event={"ID":"295550b8-f3fb-4687-a147-c3ee81bb20a3","Type":"ContainerStarted","Data":"a009d6bc8047cf4b5beb7efcf69b81a6468a4c3d17070a66b9262ed960850482"} Oct 11 08:30:01 crc kubenswrapper[5055]: I1011 08:30:01.164298 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" podStartSLOduration=1.164244837 podStartE2EDuration="1.164244837s" podCreationTimestamp="2025-10-11 08:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-11 08:30:01.163047903 +0000 UTC m=+5784.937321740" watchObservedRunningTime="2025-10-11 08:30:01.164244837 +0000 UTC m=+5784.938518644" Oct 11 08:30:02 crc kubenswrapper[5055]: I1011 08:30:02.157654 5055 generic.go:334] "Generic (PLEG): container finished" podID="295550b8-f3fb-4687-a147-c3ee81bb20a3" containerID="eb76c84a0983f26da99f364e5206741b7b61a65d6e7430bdbec22440b45d8565" exitCode=0 Oct 11 08:30:02 crc kubenswrapper[5055]: I1011 08:30:02.157694 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" event={"ID":"295550b8-f3fb-4687-a147-c3ee81bb20a3","Type":"ContainerDied","Data":"eb76c84a0983f26da99f364e5206741b7b61a65d6e7430bdbec22440b45d8565"} Oct 11 08:30:03 crc kubenswrapper[5055]: I1011 08:30:03.431159 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" Oct 11 08:30:03 crc kubenswrapper[5055]: I1011 08:30:03.555298 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wlrv2\" (UniqueName: \"kubernetes.io/projected/295550b8-f3fb-4687-a147-c3ee81bb20a3-kube-api-access-wlrv2\") pod \"295550b8-f3fb-4687-a147-c3ee81bb20a3\" (UID: \"295550b8-f3fb-4687-a147-c3ee81bb20a3\") " Oct 11 08:30:03 crc kubenswrapper[5055]: I1011 08:30:03.555387 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/295550b8-f3fb-4687-a147-c3ee81bb20a3-config-volume\") pod \"295550b8-f3fb-4687-a147-c3ee81bb20a3\" (UID: \"295550b8-f3fb-4687-a147-c3ee81bb20a3\") " Oct 11 08:30:03 crc kubenswrapper[5055]: I1011 08:30:03.555457 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/295550b8-f3fb-4687-a147-c3ee81bb20a3-secret-volume\") pod \"295550b8-f3fb-4687-a147-c3ee81bb20a3\" (UID: \"295550b8-f3fb-4687-a147-c3ee81bb20a3\") " Oct 11 08:30:03 crc kubenswrapper[5055]: I1011 08:30:03.556629 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/295550b8-f3fb-4687-a147-c3ee81bb20a3-config-volume" (OuterVolumeSpecName: "config-volume") pod "295550b8-f3fb-4687-a147-c3ee81bb20a3" (UID: "295550b8-f3fb-4687-a147-c3ee81bb20a3"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 08:30:03 crc kubenswrapper[5055]: I1011 08:30:03.561678 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/295550b8-f3fb-4687-a147-c3ee81bb20a3-kube-api-access-wlrv2" (OuterVolumeSpecName: "kube-api-access-wlrv2") pod "295550b8-f3fb-4687-a147-c3ee81bb20a3" (UID: "295550b8-f3fb-4687-a147-c3ee81bb20a3"). InnerVolumeSpecName "kube-api-access-wlrv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:30:03 crc kubenswrapper[5055]: I1011 08:30:03.563840 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/295550b8-f3fb-4687-a147-c3ee81bb20a3-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "295550b8-f3fb-4687-a147-c3ee81bb20a3" (UID: "295550b8-f3fb-4687-a147-c3ee81bb20a3"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 08:30:03 crc kubenswrapper[5055]: I1011 08:30:03.657422 5055 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/295550b8-f3fb-4687-a147-c3ee81bb20a3-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 08:30:03 crc kubenswrapper[5055]: I1011 08:30:03.657495 5055 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/295550b8-f3fb-4687-a147-c3ee81bb20a3-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 08:30:03 crc kubenswrapper[5055]: I1011 08:30:03.657514 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wlrv2\" (UniqueName: \"kubernetes.io/projected/295550b8-f3fb-4687-a147-c3ee81bb20a3-kube-api-access-wlrv2\") on node \"crc\" DevicePath \"\"" Oct 11 08:30:04 crc kubenswrapper[5055]: I1011 08:30:04.172887 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" event={"ID":"295550b8-f3fb-4687-a147-c3ee81bb20a3","Type":"ContainerDied","Data":"a009d6bc8047cf4b5beb7efcf69b81a6468a4c3d17070a66b9262ed960850482"} Oct 11 08:30:04 crc kubenswrapper[5055]: I1011 08:30:04.172923 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a009d6bc8047cf4b5beb7efcf69b81a6468a4c3d17070a66b9262ed960850482" Oct 11 08:30:04 crc kubenswrapper[5055]: I1011 08:30:04.172936 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln" Oct 11 08:30:04 crc kubenswrapper[5055]: I1011 08:30:04.238933 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc"] Oct 11 08:30:04 crc kubenswrapper[5055]: I1011 08:30:04.243572 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336145-26zpc"] Oct 11 08:30:05 crc kubenswrapper[5055]: I1011 08:30:05.002121 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc52ecca-cec2-4a1f-a930-42c62690889d" path="/var/lib/kubelet/pods/bc52ecca-cec2-4a1f-a930-42c62690889d/volumes" Oct 11 08:30:06 crc kubenswrapper[5055]: I1011 08:30:06.997294 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:30:06 crc kubenswrapper[5055]: E1011 08:30:06.997507 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:30:20 crc kubenswrapper[5055]: I1011 08:30:20.994513 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:30:20 crc kubenswrapper[5055]: E1011 08:30:20.995539 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:30:33 crc kubenswrapper[5055]: I1011 08:30:33.994567 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:30:33 crc kubenswrapper[5055]: E1011 08:30:33.996253 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:30:44 crc kubenswrapper[5055]: I1011 08:30:44.993337 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:30:44 crc kubenswrapper[5055]: E1011 08:30:44.995289 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:30:54 crc kubenswrapper[5055]: I1011 08:30:54.443815 5055 scope.go:117] "RemoveContainer" containerID="2779cafc80e61a7ee9e5509c3da9e854b8a61bda938540417e72e449fe88c9df" Oct 11 08:30:55 crc kubenswrapper[5055]: I1011 08:30:55.993395 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:30:55 crc kubenswrapper[5055]: E1011 08:30:55.993687 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.342047 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lgd2j"] Oct 11 08:31:06 crc kubenswrapper[5055]: E1011 08:31:06.344314 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="295550b8-f3fb-4687-a147-c3ee81bb20a3" containerName="collect-profiles" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.344421 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="295550b8-f3fb-4687-a147-c3ee81bb20a3" containerName="collect-profiles" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.344699 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="295550b8-f3fb-4687-a147-c3ee81bb20a3" containerName="collect-profiles" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.346613 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.351451 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lgd2j"] Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.489975 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-catalog-content\") pod \"redhat-operators-lgd2j\" (UID: \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\") " pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.490295 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vw4h8\" (UniqueName: \"kubernetes.io/projected/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-kube-api-access-vw4h8\") pod \"redhat-operators-lgd2j\" (UID: \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\") " pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.490348 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-utilities\") pod \"redhat-operators-lgd2j\" (UID: \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\") " pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.591823 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vw4h8\" (UniqueName: \"kubernetes.io/projected/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-kube-api-access-vw4h8\") pod \"redhat-operators-lgd2j\" (UID: \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\") " pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.591878 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-utilities\") pod \"redhat-operators-lgd2j\" (UID: \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\") " pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.591931 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-catalog-content\") pod \"redhat-operators-lgd2j\" (UID: \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\") " pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.592362 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-utilities\") pod \"redhat-operators-lgd2j\" (UID: \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\") " pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.592459 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-catalog-content\") pod \"redhat-operators-lgd2j\" (UID: \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\") " pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.610532 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vw4h8\" (UniqueName: \"kubernetes.io/projected/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-kube-api-access-vw4h8\") pod \"redhat-operators-lgd2j\" (UID: \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\") " pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.663877 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:06 crc kubenswrapper[5055]: I1011 08:31:06.998521 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:31:06 crc kubenswrapper[5055]: E1011 08:31:06.998921 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:31:07 crc kubenswrapper[5055]: I1011 08:31:07.097468 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lgd2j"] Oct 11 08:31:07 crc kubenswrapper[5055]: I1011 08:31:07.669544 5055 generic.go:334] "Generic (PLEG): container finished" podID="1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" containerID="34d815d7f51842a67f0dfec6bd022b666effaae5623254b4c47121eee22507ef" exitCode=0 Oct 11 08:31:07 crc kubenswrapper[5055]: I1011 08:31:07.669592 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lgd2j" event={"ID":"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1","Type":"ContainerDied","Data":"34d815d7f51842a67f0dfec6bd022b666effaae5623254b4c47121eee22507ef"} Oct 11 08:31:07 crc kubenswrapper[5055]: I1011 08:31:07.669617 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lgd2j" event={"ID":"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1","Type":"ContainerStarted","Data":"426a5bae293e9631c569bcfb4b6cd306fdd915837eb8137b79a20e30c82fbbf0"} Oct 11 08:31:08 crc kubenswrapper[5055]: I1011 08:31:08.677052 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lgd2j" event={"ID":"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1","Type":"ContainerStarted","Data":"e06d186c41826a0e8a7ab0c7df3a8020188d93633ceec4d88efad88684acd15b"} Oct 11 08:31:09 crc kubenswrapper[5055]: I1011 08:31:09.686827 5055 generic.go:334] "Generic (PLEG): container finished" podID="1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" containerID="e06d186c41826a0e8a7ab0c7df3a8020188d93633ceec4d88efad88684acd15b" exitCode=0 Oct 11 08:31:09 crc kubenswrapper[5055]: I1011 08:31:09.686924 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lgd2j" event={"ID":"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1","Type":"ContainerDied","Data":"e06d186c41826a0e8a7ab0c7df3a8020188d93633ceec4d88efad88684acd15b"} Oct 11 08:31:10 crc kubenswrapper[5055]: I1011 08:31:10.698999 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lgd2j" event={"ID":"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1","Type":"ContainerStarted","Data":"5c0d1aad6635a7b6d1e79a1dcce4c8b62bd0702e0d3b925e2e49ee833d8de0f6"} Oct 11 08:31:10 crc kubenswrapper[5055]: I1011 08:31:10.728306 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lgd2j" podStartSLOduration=2.240362439 podStartE2EDuration="4.728283005s" podCreationTimestamp="2025-10-11 08:31:06 +0000 UTC" firstStartedPulling="2025-10-11 08:31:07.671078969 +0000 UTC m=+5851.445352776" lastFinishedPulling="2025-10-11 08:31:10.158999535 +0000 UTC m=+5853.933273342" observedRunningTime="2025-10-11 08:31:10.725067883 +0000 UTC m=+5854.499341700" watchObservedRunningTime="2025-10-11 08:31:10.728283005 +0000 UTC m=+5854.502556832" Oct 11 08:31:16 crc kubenswrapper[5055]: I1011 08:31:16.664639 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:16 crc kubenswrapper[5055]: I1011 08:31:16.665074 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:16 crc kubenswrapper[5055]: I1011 08:31:16.721122 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:16 crc kubenswrapper[5055]: I1011 08:31:16.792858 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:16 crc kubenswrapper[5055]: I1011 08:31:16.950628 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lgd2j"] Oct 11 08:31:18 crc kubenswrapper[5055]: I1011 08:31:18.760904 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lgd2j" podUID="1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" containerName="registry-server" containerID="cri-o://5c0d1aad6635a7b6d1e79a1dcce4c8b62bd0702e0d3b925e2e49ee833d8de0f6" gracePeriod=2 Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.718901 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.769324 5055 generic.go:334] "Generic (PLEG): container finished" podID="1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" containerID="5c0d1aad6635a7b6d1e79a1dcce4c8b62bd0702e0d3b925e2e49ee833d8de0f6" exitCode=0 Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.769393 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lgd2j" event={"ID":"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1","Type":"ContainerDied","Data":"5c0d1aad6635a7b6d1e79a1dcce4c8b62bd0702e0d3b925e2e49ee833d8de0f6"} Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.769440 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lgd2j" event={"ID":"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1","Type":"ContainerDied","Data":"426a5bae293e9631c569bcfb4b6cd306fdd915837eb8137b79a20e30c82fbbf0"} Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.769464 5055 scope.go:117] "RemoveContainer" containerID="5c0d1aad6635a7b6d1e79a1dcce4c8b62bd0702e0d3b925e2e49ee833d8de0f6" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.769526 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lgd2j" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.788609 5055 scope.go:117] "RemoveContainer" containerID="e06d186c41826a0e8a7ab0c7df3a8020188d93633ceec4d88efad88684acd15b" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.804490 5055 scope.go:117] "RemoveContainer" containerID="34d815d7f51842a67f0dfec6bd022b666effaae5623254b4c47121eee22507ef" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.827249 5055 scope.go:117] "RemoveContainer" containerID="5c0d1aad6635a7b6d1e79a1dcce4c8b62bd0702e0d3b925e2e49ee833d8de0f6" Oct 11 08:31:19 crc kubenswrapper[5055]: E1011 08:31:19.827667 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c0d1aad6635a7b6d1e79a1dcce4c8b62bd0702e0d3b925e2e49ee833d8de0f6\": container with ID starting with 5c0d1aad6635a7b6d1e79a1dcce4c8b62bd0702e0d3b925e2e49ee833d8de0f6 not found: ID does not exist" containerID="5c0d1aad6635a7b6d1e79a1dcce4c8b62bd0702e0d3b925e2e49ee833d8de0f6" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.827698 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c0d1aad6635a7b6d1e79a1dcce4c8b62bd0702e0d3b925e2e49ee833d8de0f6"} err="failed to get container status \"5c0d1aad6635a7b6d1e79a1dcce4c8b62bd0702e0d3b925e2e49ee833d8de0f6\": rpc error: code = NotFound desc = could not find container \"5c0d1aad6635a7b6d1e79a1dcce4c8b62bd0702e0d3b925e2e49ee833d8de0f6\": container with ID starting with 5c0d1aad6635a7b6d1e79a1dcce4c8b62bd0702e0d3b925e2e49ee833d8de0f6 not found: ID does not exist" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.827719 5055 scope.go:117] "RemoveContainer" containerID="e06d186c41826a0e8a7ab0c7df3a8020188d93633ceec4d88efad88684acd15b" Oct 11 08:31:19 crc kubenswrapper[5055]: E1011 08:31:19.828096 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e06d186c41826a0e8a7ab0c7df3a8020188d93633ceec4d88efad88684acd15b\": container with ID starting with e06d186c41826a0e8a7ab0c7df3a8020188d93633ceec4d88efad88684acd15b not found: ID does not exist" containerID="e06d186c41826a0e8a7ab0c7df3a8020188d93633ceec4d88efad88684acd15b" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.828146 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e06d186c41826a0e8a7ab0c7df3a8020188d93633ceec4d88efad88684acd15b"} err="failed to get container status \"e06d186c41826a0e8a7ab0c7df3a8020188d93633ceec4d88efad88684acd15b\": rpc error: code = NotFound desc = could not find container \"e06d186c41826a0e8a7ab0c7df3a8020188d93633ceec4d88efad88684acd15b\": container with ID starting with e06d186c41826a0e8a7ab0c7df3a8020188d93633ceec4d88efad88684acd15b not found: ID does not exist" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.828176 5055 scope.go:117] "RemoveContainer" containerID="34d815d7f51842a67f0dfec6bd022b666effaae5623254b4c47121eee22507ef" Oct 11 08:31:19 crc kubenswrapper[5055]: E1011 08:31:19.828489 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34d815d7f51842a67f0dfec6bd022b666effaae5623254b4c47121eee22507ef\": container with ID starting with 34d815d7f51842a67f0dfec6bd022b666effaae5623254b4c47121eee22507ef not found: ID does not exist" containerID="34d815d7f51842a67f0dfec6bd022b666effaae5623254b4c47121eee22507ef" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.828536 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34d815d7f51842a67f0dfec6bd022b666effaae5623254b4c47121eee22507ef"} err="failed to get container status \"34d815d7f51842a67f0dfec6bd022b666effaae5623254b4c47121eee22507ef\": rpc error: code = NotFound desc = could not find container \"34d815d7f51842a67f0dfec6bd022b666effaae5623254b4c47121eee22507ef\": container with ID starting with 34d815d7f51842a67f0dfec6bd022b666effaae5623254b4c47121eee22507ef not found: ID does not exist" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.879671 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-utilities\") pod \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\" (UID: \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\") " Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.879759 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-catalog-content\") pod \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\" (UID: \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\") " Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.879840 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vw4h8\" (UniqueName: \"kubernetes.io/projected/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-kube-api-access-vw4h8\") pod \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\" (UID: \"1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1\") " Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.880691 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-utilities" (OuterVolumeSpecName: "utilities") pod "1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" (UID: "1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.885027 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-kube-api-access-vw4h8" (OuterVolumeSpecName: "kube-api-access-vw4h8") pod "1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" (UID: "1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1"). InnerVolumeSpecName "kube-api-access-vw4h8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.972434 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" (UID: "1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.981841 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.981873 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:31:19 crc kubenswrapper[5055]: I1011 08:31:19.981920 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vw4h8\" (UniqueName: \"kubernetes.io/projected/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1-kube-api-access-vw4h8\") on node \"crc\" DevicePath \"\"" Oct 11 08:31:20 crc kubenswrapper[5055]: I1011 08:31:20.107915 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lgd2j"] Oct 11 08:31:20 crc kubenswrapper[5055]: I1011 08:31:20.114206 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lgd2j"] Oct 11 08:31:21 crc kubenswrapper[5055]: I1011 08:31:21.008508 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" path="/var/lib/kubelet/pods/1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1/volumes" Oct 11 08:31:21 crc kubenswrapper[5055]: I1011 08:31:21.993944 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:31:21 crc kubenswrapper[5055]: E1011 08:31:21.994335 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:31:34 crc kubenswrapper[5055]: I1011 08:31:34.993995 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:31:34 crc kubenswrapper[5055]: E1011 08:31:34.995086 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:31:48 crc kubenswrapper[5055]: I1011 08:31:48.994425 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:31:48 crc kubenswrapper[5055]: E1011 08:31:48.995298 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:32:03 crc kubenswrapper[5055]: I1011 08:32:03.993312 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:32:03 crc kubenswrapper[5055]: E1011 08:32:03.994080 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:32:17 crc kubenswrapper[5055]: I1011 08:32:17.002095 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:32:17 crc kubenswrapper[5055]: E1011 08:32:17.004465 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:32:30 crc kubenswrapper[5055]: I1011 08:32:30.993197 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:32:30 crc kubenswrapper[5055]: E1011 08:32:30.993973 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:32:41 crc kubenswrapper[5055]: I1011 08:32:41.994728 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:32:42 crc kubenswrapper[5055]: I1011 08:32:42.461755 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"c72299c0c74cfbec5bb6fd5c87f45403bf5c8e138ed8a6c9e35334abe9c15b05"} Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.763585 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-np99m"] Oct 11 08:33:02 crc kubenswrapper[5055]: E1011 08:33:02.764512 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" containerName="registry-server" Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.764527 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" containerName="registry-server" Oct 11 08:33:02 crc kubenswrapper[5055]: E1011 08:33:02.764548 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" containerName="extract-content" Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.764553 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" containerName="extract-content" Oct 11 08:33:02 crc kubenswrapper[5055]: E1011 08:33:02.764584 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" containerName="extract-utilities" Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.764591 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" containerName="extract-utilities" Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.764747 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="1306fa5b-4da9-4ed5-9af8-2a85c9d81bf1" containerName="registry-server" Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.766057 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.775900 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-np99m"] Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.782722 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-catalog-content\") pod \"community-operators-np99m\" (UID: \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\") " pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.783002 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lztp4\" (UniqueName: \"kubernetes.io/projected/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-kube-api-access-lztp4\") pod \"community-operators-np99m\" (UID: \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\") " pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.783127 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-utilities\") pod \"community-operators-np99m\" (UID: \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\") " pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.883807 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lztp4\" (UniqueName: \"kubernetes.io/projected/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-kube-api-access-lztp4\") pod \"community-operators-np99m\" (UID: \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\") " pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.883870 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-utilities\") pod \"community-operators-np99m\" (UID: \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\") " pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.883935 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-catalog-content\") pod \"community-operators-np99m\" (UID: \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\") " pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.884342 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-utilities\") pod \"community-operators-np99m\" (UID: \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\") " pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.886907 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-catalog-content\") pod \"community-operators-np99m\" (UID: \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\") " pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:02 crc kubenswrapper[5055]: I1011 08:33:02.917116 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lztp4\" (UniqueName: \"kubernetes.io/projected/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-kube-api-access-lztp4\") pod \"community-operators-np99m\" (UID: \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\") " pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:03 crc kubenswrapper[5055]: I1011 08:33:03.091321 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:03 crc kubenswrapper[5055]: I1011 08:33:03.586638 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-np99m"] Oct 11 08:33:03 crc kubenswrapper[5055]: I1011 08:33:03.667086 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-np99m" event={"ID":"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c","Type":"ContainerStarted","Data":"fd20a7d15039552a7610f968175d6d2d0f6694c1527b3d216bade27339e4efdb"} Oct 11 08:33:04 crc kubenswrapper[5055]: I1011 08:33:04.677530 5055 generic.go:334] "Generic (PLEG): container finished" podID="c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" containerID="f0e7924ac3f697d68b9d973fbcc16e37ab6552c87740a8837ca6865bc543cbe9" exitCode=0 Oct 11 08:33:04 crc kubenswrapper[5055]: I1011 08:33:04.677628 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-np99m" event={"ID":"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c","Type":"ContainerDied","Data":"f0e7924ac3f697d68b9d973fbcc16e37ab6552c87740a8837ca6865bc543cbe9"} Oct 11 08:33:04 crc kubenswrapper[5055]: I1011 08:33:04.682559 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 08:33:06 crc kubenswrapper[5055]: I1011 08:33:06.698606 5055 generic.go:334] "Generic (PLEG): container finished" podID="c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" containerID="3311f8c19079d267da7a6a942e500aee55c92a88ad7eeeeb7d5d0a2f8b020476" exitCode=0 Oct 11 08:33:06 crc kubenswrapper[5055]: I1011 08:33:06.698642 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-np99m" event={"ID":"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c","Type":"ContainerDied","Data":"3311f8c19079d267da7a6a942e500aee55c92a88ad7eeeeb7d5d0a2f8b020476"} Oct 11 08:33:07 crc kubenswrapper[5055]: I1011 08:33:07.708569 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-np99m" event={"ID":"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c","Type":"ContainerStarted","Data":"916820b1d2a9ac97ffadb6f092274da625af0fde82e628a5e4f73d66c0a55a11"} Oct 11 08:33:07 crc kubenswrapper[5055]: I1011 08:33:07.728194 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-np99m" podStartSLOduration=3.227963853 podStartE2EDuration="5.728174268s" podCreationTimestamp="2025-10-11 08:33:02 +0000 UTC" firstStartedPulling="2025-10-11 08:33:04.682064748 +0000 UTC m=+5968.456338585" lastFinishedPulling="2025-10-11 08:33:07.182275173 +0000 UTC m=+5970.956549000" observedRunningTime="2025-10-11 08:33:07.727140479 +0000 UTC m=+5971.501414286" watchObservedRunningTime="2025-10-11 08:33:07.728174268 +0000 UTC m=+5971.502448085" Oct 11 08:33:13 crc kubenswrapper[5055]: I1011 08:33:13.092609 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:13 crc kubenswrapper[5055]: I1011 08:33:13.092955 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:13 crc kubenswrapper[5055]: I1011 08:33:13.155156 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:13 crc kubenswrapper[5055]: I1011 08:33:13.839565 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:13 crc kubenswrapper[5055]: I1011 08:33:13.901034 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-np99m"] Oct 11 08:33:15 crc kubenswrapper[5055]: I1011 08:33:15.774142 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-np99m" podUID="c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" containerName="registry-server" containerID="cri-o://916820b1d2a9ac97ffadb6f092274da625af0fde82e628a5e4f73d66c0a55a11" gracePeriod=2 Oct 11 08:33:15 crc kubenswrapper[5055]: I1011 08:33:15.801203 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fwt2v"] Oct 11 08:33:15 crc kubenswrapper[5055]: I1011 08:33:15.804852 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:15 crc kubenswrapper[5055]: I1011 08:33:15.812102 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fwt2v"] Oct 11 08:33:15 crc kubenswrapper[5055]: I1011 08:33:15.973131 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6d8w\" (UniqueName: \"kubernetes.io/projected/29eba458-0f5c-4366-8db8-08971a4af00b-kube-api-access-q6d8w\") pod \"redhat-marketplace-fwt2v\" (UID: \"29eba458-0f5c-4366-8db8-08971a4af00b\") " pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:15 crc kubenswrapper[5055]: I1011 08:33:15.973203 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29eba458-0f5c-4366-8db8-08971a4af00b-catalog-content\") pod \"redhat-marketplace-fwt2v\" (UID: \"29eba458-0f5c-4366-8db8-08971a4af00b\") " pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:15 crc kubenswrapper[5055]: I1011 08:33:15.973234 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29eba458-0f5c-4366-8db8-08971a4af00b-utilities\") pod \"redhat-marketplace-fwt2v\" (UID: \"29eba458-0f5c-4366-8db8-08971a4af00b\") " pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.074341 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29eba458-0f5c-4366-8db8-08971a4af00b-utilities\") pod \"redhat-marketplace-fwt2v\" (UID: \"29eba458-0f5c-4366-8db8-08971a4af00b\") " pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.074486 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6d8w\" (UniqueName: \"kubernetes.io/projected/29eba458-0f5c-4366-8db8-08971a4af00b-kube-api-access-q6d8w\") pod \"redhat-marketplace-fwt2v\" (UID: \"29eba458-0f5c-4366-8db8-08971a4af00b\") " pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.074526 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29eba458-0f5c-4366-8db8-08971a4af00b-catalog-content\") pod \"redhat-marketplace-fwt2v\" (UID: \"29eba458-0f5c-4366-8db8-08971a4af00b\") " pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.078093 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29eba458-0f5c-4366-8db8-08971a4af00b-utilities\") pod \"redhat-marketplace-fwt2v\" (UID: \"29eba458-0f5c-4366-8db8-08971a4af00b\") " pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.082692 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29eba458-0f5c-4366-8db8-08971a4af00b-catalog-content\") pod \"redhat-marketplace-fwt2v\" (UID: \"29eba458-0f5c-4366-8db8-08971a4af00b\") " pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.096101 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6d8w\" (UniqueName: \"kubernetes.io/projected/29eba458-0f5c-4366-8db8-08971a4af00b-kube-api-access-q6d8w\") pod \"redhat-marketplace-fwt2v\" (UID: \"29eba458-0f5c-4366-8db8-08971a4af00b\") " pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.214720 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.277469 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.381170 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lztp4\" (UniqueName: \"kubernetes.io/projected/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-kube-api-access-lztp4\") pod \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\" (UID: \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\") " Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.381316 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-utilities\") pod \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\" (UID: \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\") " Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.381358 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-catalog-content\") pod \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\" (UID: \"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c\") " Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.382408 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-utilities" (OuterVolumeSpecName: "utilities") pod "c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" (UID: "c4a9ebd5-b51c-4923-b3fb-ee6bec53992c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.386138 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-kube-api-access-lztp4" (OuterVolumeSpecName: "kube-api-access-lztp4") pod "c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" (UID: "c4a9ebd5-b51c-4923-b3fb-ee6bec53992c"). InnerVolumeSpecName "kube-api-access-lztp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.448169 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" (UID: "c4a9ebd5-b51c-4923-b3fb-ee6bec53992c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.484146 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lztp4\" (UniqueName: \"kubernetes.io/projected/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-kube-api-access-lztp4\") on node \"crc\" DevicePath \"\"" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.484425 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.484487 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.632393 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fwt2v"] Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.782964 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fwt2v" event={"ID":"29eba458-0f5c-4366-8db8-08971a4af00b","Type":"ContainerStarted","Data":"d303945feb21b7a9d583104cee891027e3dbd102eae05dd3e3699a23b516e334"} Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.783307 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fwt2v" event={"ID":"29eba458-0f5c-4366-8db8-08971a4af00b","Type":"ContainerStarted","Data":"95bb3c6b0a665bbb2eb8362783885148a02ff11a88bc0735900d0db75f2fc0bf"} Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.786751 5055 generic.go:334] "Generic (PLEG): container finished" podID="c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" containerID="916820b1d2a9ac97ffadb6f092274da625af0fde82e628a5e4f73d66c0a55a11" exitCode=0 Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.786887 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-np99m" event={"ID":"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c","Type":"ContainerDied","Data":"916820b1d2a9ac97ffadb6f092274da625af0fde82e628a5e4f73d66c0a55a11"} Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.786997 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-np99m" event={"ID":"c4a9ebd5-b51c-4923-b3fb-ee6bec53992c","Type":"ContainerDied","Data":"fd20a7d15039552a7610f968175d6d2d0f6694c1527b3d216bade27339e4efdb"} Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.787052 5055 scope.go:117] "RemoveContainer" containerID="916820b1d2a9ac97ffadb6f092274da625af0fde82e628a5e4f73d66c0a55a11" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.787307 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-np99m" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.856005 5055 scope.go:117] "RemoveContainer" containerID="3311f8c19079d267da7a6a942e500aee55c92a88ad7eeeeb7d5d0a2f8b020476" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.865422 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-np99m"] Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.870903 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-np99m"] Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.886314 5055 scope.go:117] "RemoveContainer" containerID="f0e7924ac3f697d68b9d973fbcc16e37ab6552c87740a8837ca6865bc543cbe9" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.936915 5055 scope.go:117] "RemoveContainer" containerID="916820b1d2a9ac97ffadb6f092274da625af0fde82e628a5e4f73d66c0a55a11" Oct 11 08:33:16 crc kubenswrapper[5055]: E1011 08:33:16.937235 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"916820b1d2a9ac97ffadb6f092274da625af0fde82e628a5e4f73d66c0a55a11\": container with ID starting with 916820b1d2a9ac97ffadb6f092274da625af0fde82e628a5e4f73d66c0a55a11 not found: ID does not exist" containerID="916820b1d2a9ac97ffadb6f092274da625af0fde82e628a5e4f73d66c0a55a11" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.937275 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"916820b1d2a9ac97ffadb6f092274da625af0fde82e628a5e4f73d66c0a55a11"} err="failed to get container status \"916820b1d2a9ac97ffadb6f092274da625af0fde82e628a5e4f73d66c0a55a11\": rpc error: code = NotFound desc = could not find container \"916820b1d2a9ac97ffadb6f092274da625af0fde82e628a5e4f73d66c0a55a11\": container with ID starting with 916820b1d2a9ac97ffadb6f092274da625af0fde82e628a5e4f73d66c0a55a11 not found: ID does not exist" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.937301 5055 scope.go:117] "RemoveContainer" containerID="3311f8c19079d267da7a6a942e500aee55c92a88ad7eeeeb7d5d0a2f8b020476" Oct 11 08:33:16 crc kubenswrapper[5055]: E1011 08:33:16.937536 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3311f8c19079d267da7a6a942e500aee55c92a88ad7eeeeb7d5d0a2f8b020476\": container with ID starting with 3311f8c19079d267da7a6a942e500aee55c92a88ad7eeeeb7d5d0a2f8b020476 not found: ID does not exist" containerID="3311f8c19079d267da7a6a942e500aee55c92a88ad7eeeeb7d5d0a2f8b020476" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.937561 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3311f8c19079d267da7a6a942e500aee55c92a88ad7eeeeb7d5d0a2f8b020476"} err="failed to get container status \"3311f8c19079d267da7a6a942e500aee55c92a88ad7eeeeb7d5d0a2f8b020476\": rpc error: code = NotFound desc = could not find container \"3311f8c19079d267da7a6a942e500aee55c92a88ad7eeeeb7d5d0a2f8b020476\": container with ID starting with 3311f8c19079d267da7a6a942e500aee55c92a88ad7eeeeb7d5d0a2f8b020476 not found: ID does not exist" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.937581 5055 scope.go:117] "RemoveContainer" containerID="f0e7924ac3f697d68b9d973fbcc16e37ab6552c87740a8837ca6865bc543cbe9" Oct 11 08:33:16 crc kubenswrapper[5055]: E1011 08:33:16.937981 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0e7924ac3f697d68b9d973fbcc16e37ab6552c87740a8837ca6865bc543cbe9\": container with ID starting with f0e7924ac3f697d68b9d973fbcc16e37ab6552c87740a8837ca6865bc543cbe9 not found: ID does not exist" containerID="f0e7924ac3f697d68b9d973fbcc16e37ab6552c87740a8837ca6865bc543cbe9" Oct 11 08:33:16 crc kubenswrapper[5055]: I1011 08:33:16.938004 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0e7924ac3f697d68b9d973fbcc16e37ab6552c87740a8837ca6865bc543cbe9"} err="failed to get container status \"f0e7924ac3f697d68b9d973fbcc16e37ab6552c87740a8837ca6865bc543cbe9\": rpc error: code = NotFound desc = could not find container \"f0e7924ac3f697d68b9d973fbcc16e37ab6552c87740a8837ca6865bc543cbe9\": container with ID starting with f0e7924ac3f697d68b9d973fbcc16e37ab6552c87740a8837ca6865bc543cbe9 not found: ID does not exist" Oct 11 08:33:17 crc kubenswrapper[5055]: I1011 08:33:17.004785 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" path="/var/lib/kubelet/pods/c4a9ebd5-b51c-4923-b3fb-ee6bec53992c/volumes" Oct 11 08:33:17 crc kubenswrapper[5055]: I1011 08:33:17.800548 5055 generic.go:334] "Generic (PLEG): container finished" podID="29eba458-0f5c-4366-8db8-08971a4af00b" containerID="d303945feb21b7a9d583104cee891027e3dbd102eae05dd3e3699a23b516e334" exitCode=0 Oct 11 08:33:17 crc kubenswrapper[5055]: I1011 08:33:17.800946 5055 generic.go:334] "Generic (PLEG): container finished" podID="29eba458-0f5c-4366-8db8-08971a4af00b" containerID="6db16728c969a057bec174c70013f1d8be204e3a504b0998e580c0f1ef4a9ecc" exitCode=0 Oct 11 08:33:17 crc kubenswrapper[5055]: I1011 08:33:17.800597 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fwt2v" event={"ID":"29eba458-0f5c-4366-8db8-08971a4af00b","Type":"ContainerDied","Data":"d303945feb21b7a9d583104cee891027e3dbd102eae05dd3e3699a23b516e334"} Oct 11 08:33:17 crc kubenswrapper[5055]: I1011 08:33:17.801029 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fwt2v" event={"ID":"29eba458-0f5c-4366-8db8-08971a4af00b","Type":"ContainerDied","Data":"6db16728c969a057bec174c70013f1d8be204e3a504b0998e580c0f1ef4a9ecc"} Oct 11 08:33:18 crc kubenswrapper[5055]: I1011 08:33:18.818904 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fwt2v" event={"ID":"29eba458-0f5c-4366-8db8-08971a4af00b","Type":"ContainerStarted","Data":"8469b5c3ecebc471e6145d015c61da4eec421cfd0588077db2e392b07bceed02"} Oct 11 08:33:18 crc kubenswrapper[5055]: I1011 08:33:18.847513 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fwt2v" podStartSLOduration=2.397183073 podStartE2EDuration="3.847486147s" podCreationTimestamp="2025-10-11 08:33:15 +0000 UTC" firstStartedPulling="2025-10-11 08:33:16.784709696 +0000 UTC m=+5980.558983503" lastFinishedPulling="2025-10-11 08:33:18.23501276 +0000 UTC m=+5982.009286577" observedRunningTime="2025-10-11 08:33:18.839087588 +0000 UTC m=+5982.613361415" watchObservedRunningTime="2025-10-11 08:33:18.847486147 +0000 UTC m=+5982.621759964" Oct 11 08:33:26 crc kubenswrapper[5055]: I1011 08:33:26.215910 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:26 crc kubenswrapper[5055]: I1011 08:33:26.216324 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:26 crc kubenswrapper[5055]: I1011 08:33:26.294746 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:26 crc kubenswrapper[5055]: I1011 08:33:26.935191 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:26 crc kubenswrapper[5055]: I1011 08:33:26.982232 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fwt2v"] Oct 11 08:33:28 crc kubenswrapper[5055]: I1011 08:33:28.899403 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fwt2v" podUID="29eba458-0f5c-4366-8db8-08971a4af00b" containerName="registry-server" containerID="cri-o://8469b5c3ecebc471e6145d015c61da4eec421cfd0588077db2e392b07bceed02" gracePeriod=2 Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.302468 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.471290 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6d8w\" (UniqueName: \"kubernetes.io/projected/29eba458-0f5c-4366-8db8-08971a4af00b-kube-api-access-q6d8w\") pod \"29eba458-0f5c-4366-8db8-08971a4af00b\" (UID: \"29eba458-0f5c-4366-8db8-08971a4af00b\") " Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.471354 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29eba458-0f5c-4366-8db8-08971a4af00b-catalog-content\") pod \"29eba458-0f5c-4366-8db8-08971a4af00b\" (UID: \"29eba458-0f5c-4366-8db8-08971a4af00b\") " Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.471376 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29eba458-0f5c-4366-8db8-08971a4af00b-utilities\") pod \"29eba458-0f5c-4366-8db8-08971a4af00b\" (UID: \"29eba458-0f5c-4366-8db8-08971a4af00b\") " Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.472481 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29eba458-0f5c-4366-8db8-08971a4af00b-utilities" (OuterVolumeSpecName: "utilities") pod "29eba458-0f5c-4366-8db8-08971a4af00b" (UID: "29eba458-0f5c-4366-8db8-08971a4af00b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.476571 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29eba458-0f5c-4366-8db8-08971a4af00b-kube-api-access-q6d8w" (OuterVolumeSpecName: "kube-api-access-q6d8w") pod "29eba458-0f5c-4366-8db8-08971a4af00b" (UID: "29eba458-0f5c-4366-8db8-08971a4af00b"). InnerVolumeSpecName "kube-api-access-q6d8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.488154 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29eba458-0f5c-4366-8db8-08971a4af00b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "29eba458-0f5c-4366-8db8-08971a4af00b" (UID: "29eba458-0f5c-4366-8db8-08971a4af00b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.573318 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6d8w\" (UniqueName: \"kubernetes.io/projected/29eba458-0f5c-4366-8db8-08971a4af00b-kube-api-access-q6d8w\") on node \"crc\" DevicePath \"\"" Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.573348 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29eba458-0f5c-4366-8db8-08971a4af00b-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.573357 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29eba458-0f5c-4366-8db8-08971a4af00b-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.908885 5055 generic.go:334] "Generic (PLEG): container finished" podID="29eba458-0f5c-4366-8db8-08971a4af00b" containerID="8469b5c3ecebc471e6145d015c61da4eec421cfd0588077db2e392b07bceed02" exitCode=0 Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.908942 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fwt2v" event={"ID":"29eba458-0f5c-4366-8db8-08971a4af00b","Type":"ContainerDied","Data":"8469b5c3ecebc471e6145d015c61da4eec421cfd0588077db2e392b07bceed02"} Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.909017 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fwt2v" event={"ID":"29eba458-0f5c-4366-8db8-08971a4af00b","Type":"ContainerDied","Data":"95bb3c6b0a665bbb2eb8362783885148a02ff11a88bc0735900d0db75f2fc0bf"} Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.909019 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fwt2v" Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.909049 5055 scope.go:117] "RemoveContainer" containerID="8469b5c3ecebc471e6145d015c61da4eec421cfd0588077db2e392b07bceed02" Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.954921 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fwt2v"] Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.955759 5055 scope.go:117] "RemoveContainer" containerID="6db16728c969a057bec174c70013f1d8be204e3a504b0998e580c0f1ef4a9ecc" Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.959008 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fwt2v"] Oct 11 08:33:29 crc kubenswrapper[5055]: I1011 08:33:29.972623 5055 scope.go:117] "RemoveContainer" containerID="d303945feb21b7a9d583104cee891027e3dbd102eae05dd3e3699a23b516e334" Oct 11 08:33:30 crc kubenswrapper[5055]: I1011 08:33:30.002309 5055 scope.go:117] "RemoveContainer" containerID="8469b5c3ecebc471e6145d015c61da4eec421cfd0588077db2e392b07bceed02" Oct 11 08:33:30 crc kubenswrapper[5055]: E1011 08:33:30.002759 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8469b5c3ecebc471e6145d015c61da4eec421cfd0588077db2e392b07bceed02\": container with ID starting with 8469b5c3ecebc471e6145d015c61da4eec421cfd0588077db2e392b07bceed02 not found: ID does not exist" containerID="8469b5c3ecebc471e6145d015c61da4eec421cfd0588077db2e392b07bceed02" Oct 11 08:33:30 crc kubenswrapper[5055]: I1011 08:33:30.002818 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8469b5c3ecebc471e6145d015c61da4eec421cfd0588077db2e392b07bceed02"} err="failed to get container status \"8469b5c3ecebc471e6145d015c61da4eec421cfd0588077db2e392b07bceed02\": rpc error: code = NotFound desc = could not find container \"8469b5c3ecebc471e6145d015c61da4eec421cfd0588077db2e392b07bceed02\": container with ID starting with 8469b5c3ecebc471e6145d015c61da4eec421cfd0588077db2e392b07bceed02 not found: ID does not exist" Oct 11 08:33:30 crc kubenswrapper[5055]: I1011 08:33:30.002840 5055 scope.go:117] "RemoveContainer" containerID="6db16728c969a057bec174c70013f1d8be204e3a504b0998e580c0f1ef4a9ecc" Oct 11 08:33:30 crc kubenswrapper[5055]: E1011 08:33:30.003847 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6db16728c969a057bec174c70013f1d8be204e3a504b0998e580c0f1ef4a9ecc\": container with ID starting with 6db16728c969a057bec174c70013f1d8be204e3a504b0998e580c0f1ef4a9ecc not found: ID does not exist" containerID="6db16728c969a057bec174c70013f1d8be204e3a504b0998e580c0f1ef4a9ecc" Oct 11 08:33:30 crc kubenswrapper[5055]: I1011 08:33:30.004579 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6db16728c969a057bec174c70013f1d8be204e3a504b0998e580c0f1ef4a9ecc"} err="failed to get container status \"6db16728c969a057bec174c70013f1d8be204e3a504b0998e580c0f1ef4a9ecc\": rpc error: code = NotFound desc = could not find container \"6db16728c969a057bec174c70013f1d8be204e3a504b0998e580c0f1ef4a9ecc\": container with ID starting with 6db16728c969a057bec174c70013f1d8be204e3a504b0998e580c0f1ef4a9ecc not found: ID does not exist" Oct 11 08:33:30 crc kubenswrapper[5055]: I1011 08:33:30.004659 5055 scope.go:117] "RemoveContainer" containerID="d303945feb21b7a9d583104cee891027e3dbd102eae05dd3e3699a23b516e334" Oct 11 08:33:30 crc kubenswrapper[5055]: E1011 08:33:30.005059 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d303945feb21b7a9d583104cee891027e3dbd102eae05dd3e3699a23b516e334\": container with ID starting with d303945feb21b7a9d583104cee891027e3dbd102eae05dd3e3699a23b516e334 not found: ID does not exist" containerID="d303945feb21b7a9d583104cee891027e3dbd102eae05dd3e3699a23b516e334" Oct 11 08:33:30 crc kubenswrapper[5055]: I1011 08:33:30.005086 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d303945feb21b7a9d583104cee891027e3dbd102eae05dd3e3699a23b516e334"} err="failed to get container status \"d303945feb21b7a9d583104cee891027e3dbd102eae05dd3e3699a23b516e334\": rpc error: code = NotFound desc = could not find container \"d303945feb21b7a9d583104cee891027e3dbd102eae05dd3e3699a23b516e334\": container with ID starting with d303945feb21b7a9d583104cee891027e3dbd102eae05dd3e3699a23b516e334 not found: ID does not exist" Oct 11 08:33:31 crc kubenswrapper[5055]: I1011 08:33:31.006791 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29eba458-0f5c-4366-8db8-08971a4af00b" path="/var/lib/kubelet/pods/29eba458-0f5c-4366-8db8-08971a4af00b/volumes" Oct 11 08:35:02 crc kubenswrapper[5055]: I1011 08:35:02.422638 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:35:02 crc kubenswrapper[5055]: I1011 08:35:02.423280 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:35:32 crc kubenswrapper[5055]: I1011 08:35:32.422727 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:35:32 crc kubenswrapper[5055]: I1011 08:35:32.423526 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:36:02 crc kubenswrapper[5055]: I1011 08:36:02.422302 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:36:02 crc kubenswrapper[5055]: I1011 08:36:02.422838 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:36:02 crc kubenswrapper[5055]: I1011 08:36:02.422888 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 08:36:02 crc kubenswrapper[5055]: I1011 08:36:02.423446 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c72299c0c74cfbec5bb6fd5c87f45403bf5c8e138ed8a6c9e35334abe9c15b05"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 08:36:02 crc kubenswrapper[5055]: I1011 08:36:02.423500 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://c72299c0c74cfbec5bb6fd5c87f45403bf5c8e138ed8a6c9e35334abe9c15b05" gracePeriod=600 Oct 11 08:36:03 crc kubenswrapper[5055]: I1011 08:36:03.094579 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="c72299c0c74cfbec5bb6fd5c87f45403bf5c8e138ed8a6c9e35334abe9c15b05" exitCode=0 Oct 11 08:36:03 crc kubenswrapper[5055]: I1011 08:36:03.094642 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"c72299c0c74cfbec5bb6fd5c87f45403bf5c8e138ed8a6c9e35334abe9c15b05"} Oct 11 08:36:03 crc kubenswrapper[5055]: I1011 08:36:03.095369 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3"} Oct 11 08:36:03 crc kubenswrapper[5055]: I1011 08:36:03.095409 5055 scope.go:117] "RemoveContainer" containerID="e8de5f3f02d54ec0425476ba9f69292571212d7df9d6cce12f928cdca5699af1" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.786052 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8pmzd"] Oct 11 08:37:06 crc kubenswrapper[5055]: E1011 08:37:06.787418 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" containerName="extract-content" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.787440 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" containerName="extract-content" Oct 11 08:37:06 crc kubenswrapper[5055]: E1011 08:37:06.787465 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29eba458-0f5c-4366-8db8-08971a4af00b" containerName="registry-server" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.787474 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="29eba458-0f5c-4366-8db8-08971a4af00b" containerName="registry-server" Oct 11 08:37:06 crc kubenswrapper[5055]: E1011 08:37:06.787492 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" containerName="registry-server" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.787502 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" containerName="registry-server" Oct 11 08:37:06 crc kubenswrapper[5055]: E1011 08:37:06.787518 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29eba458-0f5c-4366-8db8-08971a4af00b" containerName="extract-content" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.787526 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="29eba458-0f5c-4366-8db8-08971a4af00b" containerName="extract-content" Oct 11 08:37:06 crc kubenswrapper[5055]: E1011 08:37:06.787546 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29eba458-0f5c-4366-8db8-08971a4af00b" containerName="extract-utilities" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.787555 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="29eba458-0f5c-4366-8db8-08971a4af00b" containerName="extract-utilities" Oct 11 08:37:06 crc kubenswrapper[5055]: E1011 08:37:06.787572 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" containerName="extract-utilities" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.787581 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" containerName="extract-utilities" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.787824 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4a9ebd5-b51c-4923-b3fb-ee6bec53992c" containerName="registry-server" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.787845 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="29eba458-0f5c-4366-8db8-08971a4af00b" containerName="registry-server" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.789319 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.819163 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8pmzd"] Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.862121 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2c5g\" (UniqueName: \"kubernetes.io/projected/80eaccbb-e960-4758-9e7f-1a30714a456a-kube-api-access-l2c5g\") pod \"certified-operators-8pmzd\" (UID: \"80eaccbb-e960-4758-9e7f-1a30714a456a\") " pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.862227 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80eaccbb-e960-4758-9e7f-1a30714a456a-catalog-content\") pod \"certified-operators-8pmzd\" (UID: \"80eaccbb-e960-4758-9e7f-1a30714a456a\") " pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.862283 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80eaccbb-e960-4758-9e7f-1a30714a456a-utilities\") pod \"certified-operators-8pmzd\" (UID: \"80eaccbb-e960-4758-9e7f-1a30714a456a\") " pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.964079 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80eaccbb-e960-4758-9e7f-1a30714a456a-catalog-content\") pod \"certified-operators-8pmzd\" (UID: \"80eaccbb-e960-4758-9e7f-1a30714a456a\") " pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.964147 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80eaccbb-e960-4758-9e7f-1a30714a456a-utilities\") pod \"certified-operators-8pmzd\" (UID: \"80eaccbb-e960-4758-9e7f-1a30714a456a\") " pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.964185 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2c5g\" (UniqueName: \"kubernetes.io/projected/80eaccbb-e960-4758-9e7f-1a30714a456a-kube-api-access-l2c5g\") pod \"certified-operators-8pmzd\" (UID: \"80eaccbb-e960-4758-9e7f-1a30714a456a\") " pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.964729 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80eaccbb-e960-4758-9e7f-1a30714a456a-utilities\") pod \"certified-operators-8pmzd\" (UID: \"80eaccbb-e960-4758-9e7f-1a30714a456a\") " pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.964769 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80eaccbb-e960-4758-9e7f-1a30714a456a-catalog-content\") pod \"certified-operators-8pmzd\" (UID: \"80eaccbb-e960-4758-9e7f-1a30714a456a\") " pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:06 crc kubenswrapper[5055]: I1011 08:37:06.990825 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2c5g\" (UniqueName: \"kubernetes.io/projected/80eaccbb-e960-4758-9e7f-1a30714a456a-kube-api-access-l2c5g\") pod \"certified-operators-8pmzd\" (UID: \"80eaccbb-e960-4758-9e7f-1a30714a456a\") " pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:07 crc kubenswrapper[5055]: I1011 08:37:07.121700 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:07 crc kubenswrapper[5055]: I1011 08:37:07.604313 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8pmzd"] Oct 11 08:37:07 crc kubenswrapper[5055]: I1011 08:37:07.635640 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8pmzd" event={"ID":"80eaccbb-e960-4758-9e7f-1a30714a456a","Type":"ContainerStarted","Data":"fb05ed30cc49f3d6e990bb75f2e921dba6068da55e49ea52e5afe01163d2e0e3"} Oct 11 08:37:08 crc kubenswrapper[5055]: I1011 08:37:08.645587 5055 generic.go:334] "Generic (PLEG): container finished" podID="80eaccbb-e960-4758-9e7f-1a30714a456a" containerID="dc9bbcfbef0d5e23524c3ec81df02b554afa469b62382662924b7fce1e42606e" exitCode=0 Oct 11 08:37:08 crc kubenswrapper[5055]: I1011 08:37:08.645645 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8pmzd" event={"ID":"80eaccbb-e960-4758-9e7f-1a30714a456a","Type":"ContainerDied","Data":"dc9bbcfbef0d5e23524c3ec81df02b554afa469b62382662924b7fce1e42606e"} Oct 11 08:37:09 crc kubenswrapper[5055]: I1011 08:37:09.655951 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8pmzd" event={"ID":"80eaccbb-e960-4758-9e7f-1a30714a456a","Type":"ContainerStarted","Data":"7eaa3cddaea23b6cc2d67838764d907723509c71021746fdc9b2e3672afa8f8f"} Oct 11 08:37:10 crc kubenswrapper[5055]: I1011 08:37:10.664594 5055 generic.go:334] "Generic (PLEG): container finished" podID="80eaccbb-e960-4758-9e7f-1a30714a456a" containerID="7eaa3cddaea23b6cc2d67838764d907723509c71021746fdc9b2e3672afa8f8f" exitCode=0 Oct 11 08:37:10 crc kubenswrapper[5055]: I1011 08:37:10.664674 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8pmzd" event={"ID":"80eaccbb-e960-4758-9e7f-1a30714a456a","Type":"ContainerDied","Data":"7eaa3cddaea23b6cc2d67838764d907723509c71021746fdc9b2e3672afa8f8f"} Oct 11 08:37:11 crc kubenswrapper[5055]: I1011 08:37:11.673707 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8pmzd" event={"ID":"80eaccbb-e960-4758-9e7f-1a30714a456a","Type":"ContainerStarted","Data":"803010a8a536af72dcd295eccdf2ee4a6deddfec4b78452d860b2fe23b67146a"} Oct 11 08:37:11 crc kubenswrapper[5055]: I1011 08:37:11.698979 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8pmzd" podStartSLOduration=3.134792635 podStartE2EDuration="5.698952217s" podCreationTimestamp="2025-10-11 08:37:06 +0000 UTC" firstStartedPulling="2025-10-11 08:37:08.64805864 +0000 UTC m=+6212.422332457" lastFinishedPulling="2025-10-11 08:37:11.212218202 +0000 UTC m=+6214.986492039" observedRunningTime="2025-10-11 08:37:11.695410196 +0000 UTC m=+6215.469684023" watchObservedRunningTime="2025-10-11 08:37:11.698952217 +0000 UTC m=+6215.473226044" Oct 11 08:37:17 crc kubenswrapper[5055]: I1011 08:37:17.123185 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:17 crc kubenswrapper[5055]: I1011 08:37:17.123613 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:17 crc kubenswrapper[5055]: I1011 08:37:17.196427 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:17 crc kubenswrapper[5055]: I1011 08:37:17.773691 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:17 crc kubenswrapper[5055]: I1011 08:37:17.846009 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8pmzd"] Oct 11 08:37:19 crc kubenswrapper[5055]: I1011 08:37:19.735948 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8pmzd" podUID="80eaccbb-e960-4758-9e7f-1a30714a456a" containerName="registry-server" containerID="cri-o://803010a8a536af72dcd295eccdf2ee4a6deddfec4b78452d860b2fe23b67146a" gracePeriod=2 Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.640970 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.745126 5055 generic.go:334] "Generic (PLEG): container finished" podID="80eaccbb-e960-4758-9e7f-1a30714a456a" containerID="803010a8a536af72dcd295eccdf2ee4a6deddfec4b78452d860b2fe23b67146a" exitCode=0 Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.745165 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8pmzd" event={"ID":"80eaccbb-e960-4758-9e7f-1a30714a456a","Type":"ContainerDied","Data":"803010a8a536af72dcd295eccdf2ee4a6deddfec4b78452d860b2fe23b67146a"} Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.745191 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8pmzd" event={"ID":"80eaccbb-e960-4758-9e7f-1a30714a456a","Type":"ContainerDied","Data":"fb05ed30cc49f3d6e990bb75f2e921dba6068da55e49ea52e5afe01163d2e0e3"} Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.745205 5055 scope.go:117] "RemoveContainer" containerID="803010a8a536af72dcd295eccdf2ee4a6deddfec4b78452d860b2fe23b67146a" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.745307 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8pmzd" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.765788 5055 scope.go:117] "RemoveContainer" containerID="7eaa3cddaea23b6cc2d67838764d907723509c71021746fdc9b2e3672afa8f8f" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.766216 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2c5g\" (UniqueName: \"kubernetes.io/projected/80eaccbb-e960-4758-9e7f-1a30714a456a-kube-api-access-l2c5g\") pod \"80eaccbb-e960-4758-9e7f-1a30714a456a\" (UID: \"80eaccbb-e960-4758-9e7f-1a30714a456a\") " Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.766290 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80eaccbb-e960-4758-9e7f-1a30714a456a-utilities\") pod \"80eaccbb-e960-4758-9e7f-1a30714a456a\" (UID: \"80eaccbb-e960-4758-9e7f-1a30714a456a\") " Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.766378 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80eaccbb-e960-4758-9e7f-1a30714a456a-catalog-content\") pod \"80eaccbb-e960-4758-9e7f-1a30714a456a\" (UID: \"80eaccbb-e960-4758-9e7f-1a30714a456a\") " Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.767321 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80eaccbb-e960-4758-9e7f-1a30714a456a-utilities" (OuterVolumeSpecName: "utilities") pod "80eaccbb-e960-4758-9e7f-1a30714a456a" (UID: "80eaccbb-e960-4758-9e7f-1a30714a456a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.771371 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80eaccbb-e960-4758-9e7f-1a30714a456a-kube-api-access-l2c5g" (OuterVolumeSpecName: "kube-api-access-l2c5g") pod "80eaccbb-e960-4758-9e7f-1a30714a456a" (UID: "80eaccbb-e960-4758-9e7f-1a30714a456a"). InnerVolumeSpecName "kube-api-access-l2c5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.813086 5055 scope.go:117] "RemoveContainer" containerID="dc9bbcfbef0d5e23524c3ec81df02b554afa469b62382662924b7fce1e42606e" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.817163 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80eaccbb-e960-4758-9e7f-1a30714a456a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "80eaccbb-e960-4758-9e7f-1a30714a456a" (UID: "80eaccbb-e960-4758-9e7f-1a30714a456a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.836000 5055 scope.go:117] "RemoveContainer" containerID="803010a8a536af72dcd295eccdf2ee4a6deddfec4b78452d860b2fe23b67146a" Oct 11 08:37:20 crc kubenswrapper[5055]: E1011 08:37:20.836384 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"803010a8a536af72dcd295eccdf2ee4a6deddfec4b78452d860b2fe23b67146a\": container with ID starting with 803010a8a536af72dcd295eccdf2ee4a6deddfec4b78452d860b2fe23b67146a not found: ID does not exist" containerID="803010a8a536af72dcd295eccdf2ee4a6deddfec4b78452d860b2fe23b67146a" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.836411 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"803010a8a536af72dcd295eccdf2ee4a6deddfec4b78452d860b2fe23b67146a"} err="failed to get container status \"803010a8a536af72dcd295eccdf2ee4a6deddfec4b78452d860b2fe23b67146a\": rpc error: code = NotFound desc = could not find container \"803010a8a536af72dcd295eccdf2ee4a6deddfec4b78452d860b2fe23b67146a\": container with ID starting with 803010a8a536af72dcd295eccdf2ee4a6deddfec4b78452d860b2fe23b67146a not found: ID does not exist" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.836432 5055 scope.go:117] "RemoveContainer" containerID="7eaa3cddaea23b6cc2d67838764d907723509c71021746fdc9b2e3672afa8f8f" Oct 11 08:37:20 crc kubenswrapper[5055]: E1011 08:37:20.836703 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7eaa3cddaea23b6cc2d67838764d907723509c71021746fdc9b2e3672afa8f8f\": container with ID starting with 7eaa3cddaea23b6cc2d67838764d907723509c71021746fdc9b2e3672afa8f8f not found: ID does not exist" containerID="7eaa3cddaea23b6cc2d67838764d907723509c71021746fdc9b2e3672afa8f8f" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.836717 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7eaa3cddaea23b6cc2d67838764d907723509c71021746fdc9b2e3672afa8f8f"} err="failed to get container status \"7eaa3cddaea23b6cc2d67838764d907723509c71021746fdc9b2e3672afa8f8f\": rpc error: code = NotFound desc = could not find container \"7eaa3cddaea23b6cc2d67838764d907723509c71021746fdc9b2e3672afa8f8f\": container with ID starting with 7eaa3cddaea23b6cc2d67838764d907723509c71021746fdc9b2e3672afa8f8f not found: ID does not exist" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.836728 5055 scope.go:117] "RemoveContainer" containerID="dc9bbcfbef0d5e23524c3ec81df02b554afa469b62382662924b7fce1e42606e" Oct 11 08:37:20 crc kubenswrapper[5055]: E1011 08:37:20.837579 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc9bbcfbef0d5e23524c3ec81df02b554afa469b62382662924b7fce1e42606e\": container with ID starting with dc9bbcfbef0d5e23524c3ec81df02b554afa469b62382662924b7fce1e42606e not found: ID does not exist" containerID="dc9bbcfbef0d5e23524c3ec81df02b554afa469b62382662924b7fce1e42606e" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.837990 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc9bbcfbef0d5e23524c3ec81df02b554afa469b62382662924b7fce1e42606e"} err="failed to get container status \"dc9bbcfbef0d5e23524c3ec81df02b554afa469b62382662924b7fce1e42606e\": rpc error: code = NotFound desc = could not find container \"dc9bbcfbef0d5e23524c3ec81df02b554afa469b62382662924b7fce1e42606e\": container with ID starting with dc9bbcfbef0d5e23524c3ec81df02b554afa469b62382662924b7fce1e42606e not found: ID does not exist" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.867503 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2c5g\" (UniqueName: \"kubernetes.io/projected/80eaccbb-e960-4758-9e7f-1a30714a456a-kube-api-access-l2c5g\") on node \"crc\" DevicePath \"\"" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.867538 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80eaccbb-e960-4758-9e7f-1a30714a456a-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:37:20 crc kubenswrapper[5055]: I1011 08:37:20.867546 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80eaccbb-e960-4758-9e7f-1a30714a456a-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:37:21 crc kubenswrapper[5055]: I1011 08:37:21.068254 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8pmzd"] Oct 11 08:37:21 crc kubenswrapper[5055]: I1011 08:37:21.073942 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8pmzd"] Oct 11 08:37:23 crc kubenswrapper[5055]: I1011 08:37:23.009135 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80eaccbb-e960-4758-9e7f-1a30714a456a" path="/var/lib/kubelet/pods/80eaccbb-e960-4758-9e7f-1a30714a456a/volumes" Oct 11 08:38:02 crc kubenswrapper[5055]: I1011 08:38:02.422701 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:38:02 crc kubenswrapper[5055]: I1011 08:38:02.423538 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:38:32 crc kubenswrapper[5055]: I1011 08:38:32.422346 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:38:32 crc kubenswrapper[5055]: I1011 08:38:32.423224 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:39:02 crc kubenswrapper[5055]: I1011 08:39:02.422600 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:39:02 crc kubenswrapper[5055]: I1011 08:39:02.423312 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:39:02 crc kubenswrapper[5055]: I1011 08:39:02.423390 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 08:39:02 crc kubenswrapper[5055]: I1011 08:39:02.424435 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 08:39:02 crc kubenswrapper[5055]: I1011 08:39:02.424560 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" gracePeriod=600 Oct 11 08:39:02 crc kubenswrapper[5055]: E1011 08:39:02.554529 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:39:02 crc kubenswrapper[5055]: I1011 08:39:02.657716 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" exitCode=0 Oct 11 08:39:02 crc kubenswrapper[5055]: I1011 08:39:02.657839 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3"} Oct 11 08:39:02 crc kubenswrapper[5055]: I1011 08:39:02.657930 5055 scope.go:117] "RemoveContainer" containerID="c72299c0c74cfbec5bb6fd5c87f45403bf5c8e138ed8a6c9e35334abe9c15b05" Oct 11 08:39:02 crc kubenswrapper[5055]: I1011 08:39:02.658580 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:39:02 crc kubenswrapper[5055]: E1011 08:39:02.659116 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:39:13 crc kubenswrapper[5055]: I1011 08:39:13.993642 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:39:13 crc kubenswrapper[5055]: E1011 08:39:13.994153 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:39:27 crc kubenswrapper[5055]: I1011 08:39:27.993921 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:39:27 crc kubenswrapper[5055]: E1011 08:39:27.995421 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:39:39 crc kubenswrapper[5055]: I1011 08:39:39.994214 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:39:39 crc kubenswrapper[5055]: E1011 08:39:39.995199 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:39:54 crc kubenswrapper[5055]: I1011 08:39:54.994021 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:39:54 crc kubenswrapper[5055]: E1011 08:39:54.995974 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:40:08 crc kubenswrapper[5055]: I1011 08:40:08.993522 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:40:08 crc kubenswrapper[5055]: E1011 08:40:08.994361 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:40:19 crc kubenswrapper[5055]: I1011 08:40:19.993291 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:40:19 crc kubenswrapper[5055]: E1011 08:40:19.994060 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:40:30 crc kubenswrapper[5055]: I1011 08:40:30.994273 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:40:30 crc kubenswrapper[5055]: E1011 08:40:30.995452 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:40:44 crc kubenswrapper[5055]: I1011 08:40:44.994888 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:40:44 crc kubenswrapper[5055]: E1011 08:40:44.996024 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:40:58 crc kubenswrapper[5055]: I1011 08:40:58.994234 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:40:58 crc kubenswrapper[5055]: E1011 08:40:58.995171 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:41:13 crc kubenswrapper[5055]: I1011 08:41:13.994682 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:41:13 crc kubenswrapper[5055]: E1011 08:41:13.995419 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.552986 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-s2bp7"] Oct 11 08:41:24 crc kubenswrapper[5055]: E1011 08:41:24.554079 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80eaccbb-e960-4758-9e7f-1a30714a456a" containerName="registry-server" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.554140 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="80eaccbb-e960-4758-9e7f-1a30714a456a" containerName="registry-server" Oct 11 08:41:24 crc kubenswrapper[5055]: E1011 08:41:24.554179 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80eaccbb-e960-4758-9e7f-1a30714a456a" containerName="extract-utilities" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.554192 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="80eaccbb-e960-4758-9e7f-1a30714a456a" containerName="extract-utilities" Oct 11 08:41:24 crc kubenswrapper[5055]: E1011 08:41:24.554242 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80eaccbb-e960-4758-9e7f-1a30714a456a" containerName="extract-content" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.554255 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="80eaccbb-e960-4758-9e7f-1a30714a456a" containerName="extract-content" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.554556 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="80eaccbb-e960-4758-9e7f-1a30714a456a" containerName="registry-server" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.556636 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.607950 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s2bp7"] Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.668400 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-catalog-content\") pod \"redhat-operators-s2bp7\" (UID: \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\") " pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.668511 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hhbt\" (UniqueName: \"kubernetes.io/projected/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-kube-api-access-5hhbt\") pod \"redhat-operators-s2bp7\" (UID: \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\") " pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.668615 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-utilities\") pod \"redhat-operators-s2bp7\" (UID: \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\") " pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.769707 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-utilities\") pod \"redhat-operators-s2bp7\" (UID: \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\") " pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.769801 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-catalog-content\") pod \"redhat-operators-s2bp7\" (UID: \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\") " pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.769861 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hhbt\" (UniqueName: \"kubernetes.io/projected/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-kube-api-access-5hhbt\") pod \"redhat-operators-s2bp7\" (UID: \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\") " pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.770609 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-utilities\") pod \"redhat-operators-s2bp7\" (UID: \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\") " pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.770729 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-catalog-content\") pod \"redhat-operators-s2bp7\" (UID: \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\") " pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.797838 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hhbt\" (UniqueName: \"kubernetes.io/projected/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-kube-api-access-5hhbt\") pod \"redhat-operators-s2bp7\" (UID: \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\") " pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:24 crc kubenswrapper[5055]: I1011 08:41:24.920907 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:25 crc kubenswrapper[5055]: I1011 08:41:25.132641 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s2bp7"] Oct 11 08:41:25 crc kubenswrapper[5055]: I1011 08:41:25.903326 5055 generic.go:334] "Generic (PLEG): container finished" podID="7490ab1c-cc9a-4d43-b2ad-f9a27f876865" containerID="c8ce7abb3d38d7a305da5fc142fda82a6443c925034ab3e080f9c4885c3757d8" exitCode=0 Oct 11 08:41:25 crc kubenswrapper[5055]: I1011 08:41:25.903441 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2bp7" event={"ID":"7490ab1c-cc9a-4d43-b2ad-f9a27f876865","Type":"ContainerDied","Data":"c8ce7abb3d38d7a305da5fc142fda82a6443c925034ab3e080f9c4885c3757d8"} Oct 11 08:41:25 crc kubenswrapper[5055]: I1011 08:41:25.903650 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2bp7" event={"ID":"7490ab1c-cc9a-4d43-b2ad-f9a27f876865","Type":"ContainerStarted","Data":"1b49fa596c900eed8ec3d4c48001ab1b0664161a3dbca45b280a3fb8339ddced"} Oct 11 08:41:25 crc kubenswrapper[5055]: I1011 08:41:25.905433 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 08:41:26 crc kubenswrapper[5055]: I1011 08:41:26.912797 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2bp7" event={"ID":"7490ab1c-cc9a-4d43-b2ad-f9a27f876865","Type":"ContainerStarted","Data":"436b8fc05b765b7ef64e9c16ae29120f1b81a37790177474b06c11173ad1a427"} Oct 11 08:41:27 crc kubenswrapper[5055]: I1011 08:41:27.931088 5055 generic.go:334] "Generic (PLEG): container finished" podID="7490ab1c-cc9a-4d43-b2ad-f9a27f876865" containerID="436b8fc05b765b7ef64e9c16ae29120f1b81a37790177474b06c11173ad1a427" exitCode=0 Oct 11 08:41:27 crc kubenswrapper[5055]: I1011 08:41:27.931170 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2bp7" event={"ID":"7490ab1c-cc9a-4d43-b2ad-f9a27f876865","Type":"ContainerDied","Data":"436b8fc05b765b7ef64e9c16ae29120f1b81a37790177474b06c11173ad1a427"} Oct 11 08:41:28 crc kubenswrapper[5055]: I1011 08:41:28.938596 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2bp7" event={"ID":"7490ab1c-cc9a-4d43-b2ad-f9a27f876865","Type":"ContainerStarted","Data":"7e8c15124ac562bb579ea7bc537edc35e62a9d43bb3bb2f0b8e9c3953a426e5f"} Oct 11 08:41:28 crc kubenswrapper[5055]: I1011 08:41:28.964543 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-s2bp7" podStartSLOduration=2.5266296329999998 podStartE2EDuration="4.964524573s" podCreationTimestamp="2025-10-11 08:41:24 +0000 UTC" firstStartedPulling="2025-10-11 08:41:25.905165977 +0000 UTC m=+6469.679439784" lastFinishedPulling="2025-10-11 08:41:28.343060917 +0000 UTC m=+6472.117334724" observedRunningTime="2025-10-11 08:41:28.962187177 +0000 UTC m=+6472.736461034" watchObservedRunningTime="2025-10-11 08:41:28.964524573 +0000 UTC m=+6472.738798390" Oct 11 08:41:28 crc kubenswrapper[5055]: I1011 08:41:28.993835 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:41:28 crc kubenswrapper[5055]: E1011 08:41:28.994226 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:41:34 crc kubenswrapper[5055]: I1011 08:41:34.921289 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:34 crc kubenswrapper[5055]: I1011 08:41:34.922082 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:34 crc kubenswrapper[5055]: I1011 08:41:34.986967 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:35 crc kubenswrapper[5055]: I1011 08:41:35.068050 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:35 crc kubenswrapper[5055]: I1011 08:41:35.225154 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-s2bp7"] Oct 11 08:41:37 crc kubenswrapper[5055]: I1011 08:41:37.003973 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-s2bp7" podUID="7490ab1c-cc9a-4d43-b2ad-f9a27f876865" containerName="registry-server" containerID="cri-o://7e8c15124ac562bb579ea7bc537edc35e62a9d43bb3bb2f0b8e9c3953a426e5f" gracePeriod=2 Oct 11 08:41:37 crc kubenswrapper[5055]: I1011 08:41:37.481056 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:37 crc kubenswrapper[5055]: I1011 08:41:37.648920 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-catalog-content\") pod \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\" (UID: \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\") " Oct 11 08:41:37 crc kubenswrapper[5055]: I1011 08:41:37.649044 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hhbt\" (UniqueName: \"kubernetes.io/projected/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-kube-api-access-5hhbt\") pod \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\" (UID: \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\") " Oct 11 08:41:37 crc kubenswrapper[5055]: I1011 08:41:37.649109 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-utilities\") pod \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\" (UID: \"7490ab1c-cc9a-4d43-b2ad-f9a27f876865\") " Oct 11 08:41:37 crc kubenswrapper[5055]: I1011 08:41:37.650117 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-utilities" (OuterVolumeSpecName: "utilities") pod "7490ab1c-cc9a-4d43-b2ad-f9a27f876865" (UID: "7490ab1c-cc9a-4d43-b2ad-f9a27f876865"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:41:37 crc kubenswrapper[5055]: I1011 08:41:37.654906 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-kube-api-access-5hhbt" (OuterVolumeSpecName: "kube-api-access-5hhbt") pod "7490ab1c-cc9a-4d43-b2ad-f9a27f876865" (UID: "7490ab1c-cc9a-4d43-b2ad-f9a27f876865"). InnerVolumeSpecName "kube-api-access-5hhbt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:41:37 crc kubenswrapper[5055]: I1011 08:41:37.751067 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hhbt\" (UniqueName: \"kubernetes.io/projected/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-kube-api-access-5hhbt\") on node \"crc\" DevicePath \"\"" Oct 11 08:41:37 crc kubenswrapper[5055]: I1011 08:41:37.751099 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.013429 5055 generic.go:334] "Generic (PLEG): container finished" podID="7490ab1c-cc9a-4d43-b2ad-f9a27f876865" containerID="7e8c15124ac562bb579ea7bc537edc35e62a9d43bb3bb2f0b8e9c3953a426e5f" exitCode=0 Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.013492 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s2bp7" Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.013518 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2bp7" event={"ID":"7490ab1c-cc9a-4d43-b2ad-f9a27f876865","Type":"ContainerDied","Data":"7e8c15124ac562bb579ea7bc537edc35e62a9d43bb3bb2f0b8e9c3953a426e5f"} Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.013905 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2bp7" event={"ID":"7490ab1c-cc9a-4d43-b2ad-f9a27f876865","Type":"ContainerDied","Data":"1b49fa596c900eed8ec3d4c48001ab1b0664161a3dbca45b280a3fb8339ddced"} Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.013931 5055 scope.go:117] "RemoveContainer" containerID="7e8c15124ac562bb579ea7bc537edc35e62a9d43bb3bb2f0b8e9c3953a426e5f" Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.036361 5055 scope.go:117] "RemoveContainer" containerID="436b8fc05b765b7ef64e9c16ae29120f1b81a37790177474b06c11173ad1a427" Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.065984 5055 scope.go:117] "RemoveContainer" containerID="c8ce7abb3d38d7a305da5fc142fda82a6443c925034ab3e080f9c4885c3757d8" Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.097087 5055 scope.go:117] "RemoveContainer" containerID="7e8c15124ac562bb579ea7bc537edc35e62a9d43bb3bb2f0b8e9c3953a426e5f" Oct 11 08:41:38 crc kubenswrapper[5055]: E1011 08:41:38.097597 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e8c15124ac562bb579ea7bc537edc35e62a9d43bb3bb2f0b8e9c3953a426e5f\": container with ID starting with 7e8c15124ac562bb579ea7bc537edc35e62a9d43bb3bb2f0b8e9c3953a426e5f not found: ID does not exist" containerID="7e8c15124ac562bb579ea7bc537edc35e62a9d43bb3bb2f0b8e9c3953a426e5f" Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.097670 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e8c15124ac562bb579ea7bc537edc35e62a9d43bb3bb2f0b8e9c3953a426e5f"} err="failed to get container status \"7e8c15124ac562bb579ea7bc537edc35e62a9d43bb3bb2f0b8e9c3953a426e5f\": rpc error: code = NotFound desc = could not find container \"7e8c15124ac562bb579ea7bc537edc35e62a9d43bb3bb2f0b8e9c3953a426e5f\": container with ID starting with 7e8c15124ac562bb579ea7bc537edc35e62a9d43bb3bb2f0b8e9c3953a426e5f not found: ID does not exist" Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.098304 5055 scope.go:117] "RemoveContainer" containerID="436b8fc05b765b7ef64e9c16ae29120f1b81a37790177474b06c11173ad1a427" Oct 11 08:41:38 crc kubenswrapper[5055]: E1011 08:41:38.098833 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"436b8fc05b765b7ef64e9c16ae29120f1b81a37790177474b06c11173ad1a427\": container with ID starting with 436b8fc05b765b7ef64e9c16ae29120f1b81a37790177474b06c11173ad1a427 not found: ID does not exist" containerID="436b8fc05b765b7ef64e9c16ae29120f1b81a37790177474b06c11173ad1a427" Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.098878 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"436b8fc05b765b7ef64e9c16ae29120f1b81a37790177474b06c11173ad1a427"} err="failed to get container status \"436b8fc05b765b7ef64e9c16ae29120f1b81a37790177474b06c11173ad1a427\": rpc error: code = NotFound desc = could not find container \"436b8fc05b765b7ef64e9c16ae29120f1b81a37790177474b06c11173ad1a427\": container with ID starting with 436b8fc05b765b7ef64e9c16ae29120f1b81a37790177474b06c11173ad1a427 not found: ID does not exist" Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.098907 5055 scope.go:117] "RemoveContainer" containerID="c8ce7abb3d38d7a305da5fc142fda82a6443c925034ab3e080f9c4885c3757d8" Oct 11 08:41:38 crc kubenswrapper[5055]: E1011 08:41:38.099240 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8ce7abb3d38d7a305da5fc142fda82a6443c925034ab3e080f9c4885c3757d8\": container with ID starting with c8ce7abb3d38d7a305da5fc142fda82a6443c925034ab3e080f9c4885c3757d8 not found: ID does not exist" containerID="c8ce7abb3d38d7a305da5fc142fda82a6443c925034ab3e080f9c4885c3757d8" Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.099285 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8ce7abb3d38d7a305da5fc142fda82a6443c925034ab3e080f9c4885c3757d8"} err="failed to get container status \"c8ce7abb3d38d7a305da5fc142fda82a6443c925034ab3e080f9c4885c3757d8\": rpc error: code = NotFound desc = could not find container \"c8ce7abb3d38d7a305da5fc142fda82a6443c925034ab3e080f9c4885c3757d8\": container with ID starting with c8ce7abb3d38d7a305da5fc142fda82a6443c925034ab3e080f9c4885c3757d8 not found: ID does not exist" Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.115104 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7490ab1c-cc9a-4d43-b2ad-f9a27f876865" (UID: "7490ab1c-cc9a-4d43-b2ad-f9a27f876865"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.157201 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7490ab1c-cc9a-4d43-b2ad-f9a27f876865-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.344175 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-s2bp7"] Oct 11 08:41:38 crc kubenswrapper[5055]: I1011 08:41:38.350467 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-s2bp7"] Oct 11 08:41:39 crc kubenswrapper[5055]: I1011 08:41:39.006194 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7490ab1c-cc9a-4d43-b2ad-f9a27f876865" path="/var/lib/kubelet/pods/7490ab1c-cc9a-4d43-b2ad-f9a27f876865/volumes" Oct 11 08:41:41 crc kubenswrapper[5055]: I1011 08:41:41.994275 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:41:41 crc kubenswrapper[5055]: E1011 08:41:41.994854 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:41:53 crc kubenswrapper[5055]: I1011 08:41:53.994022 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:41:53 crc kubenswrapper[5055]: E1011 08:41:53.995297 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:42:08 crc kubenswrapper[5055]: I1011 08:42:08.993529 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:42:08 crc kubenswrapper[5055]: E1011 08:42:08.994971 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:42:22 crc kubenswrapper[5055]: I1011 08:42:22.993694 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:42:22 crc kubenswrapper[5055]: E1011 08:42:22.994570 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:42:33 crc kubenswrapper[5055]: I1011 08:42:33.994401 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:42:33 crc kubenswrapper[5055]: E1011 08:42:33.995551 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:42:44 crc kubenswrapper[5055]: I1011 08:42:44.993470 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:42:44 crc kubenswrapper[5055]: E1011 08:42:44.994339 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:42:57 crc kubenswrapper[5055]: I1011 08:42:57.019287 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:42:57 crc kubenswrapper[5055]: E1011 08:42:57.021563 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:43:11 crc kubenswrapper[5055]: I1011 08:43:11.994892 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:43:11 crc kubenswrapper[5055]: E1011 08:43:11.996168 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:43:24 crc kubenswrapper[5055]: I1011 08:43:24.021682 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:43:24 crc kubenswrapper[5055]: E1011 08:43:24.023105 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:43:34 crc kubenswrapper[5055]: I1011 08:43:34.993970 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:43:34 crc kubenswrapper[5055]: E1011 08:43:34.995360 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:43:46 crc kubenswrapper[5055]: I1011 08:43:46.997474 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:43:46 crc kubenswrapper[5055]: E1011 08:43:46.998257 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:43:50 crc kubenswrapper[5055]: I1011 08:43:50.840146 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vfxmb"] Oct 11 08:43:50 crc kubenswrapper[5055]: E1011 08:43:50.840972 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7490ab1c-cc9a-4d43-b2ad-f9a27f876865" containerName="registry-server" Oct 11 08:43:50 crc kubenswrapper[5055]: I1011 08:43:50.840993 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7490ab1c-cc9a-4d43-b2ad-f9a27f876865" containerName="registry-server" Oct 11 08:43:50 crc kubenswrapper[5055]: E1011 08:43:50.841043 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7490ab1c-cc9a-4d43-b2ad-f9a27f876865" containerName="extract-utilities" Oct 11 08:43:50 crc kubenswrapper[5055]: I1011 08:43:50.841056 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7490ab1c-cc9a-4d43-b2ad-f9a27f876865" containerName="extract-utilities" Oct 11 08:43:50 crc kubenswrapper[5055]: E1011 08:43:50.841071 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7490ab1c-cc9a-4d43-b2ad-f9a27f876865" containerName="extract-content" Oct 11 08:43:50 crc kubenswrapper[5055]: I1011 08:43:50.841083 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="7490ab1c-cc9a-4d43-b2ad-f9a27f876865" containerName="extract-content" Oct 11 08:43:50 crc kubenswrapper[5055]: I1011 08:43:50.841373 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="7490ab1c-cc9a-4d43-b2ad-f9a27f876865" containerName="registry-server" Oct 11 08:43:50 crc kubenswrapper[5055]: I1011 08:43:50.843293 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:43:50 crc kubenswrapper[5055]: I1011 08:43:50.848466 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vfxmb"] Oct 11 08:43:50 crc kubenswrapper[5055]: I1011 08:43:50.985706 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab6bb86d-fc13-41ec-89bc-19906c68e290-utilities\") pod \"community-operators-vfxmb\" (UID: \"ab6bb86d-fc13-41ec-89bc-19906c68e290\") " pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:43:50 crc kubenswrapper[5055]: I1011 08:43:50.985783 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab6bb86d-fc13-41ec-89bc-19906c68e290-catalog-content\") pod \"community-operators-vfxmb\" (UID: \"ab6bb86d-fc13-41ec-89bc-19906c68e290\") " pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:43:50 crc kubenswrapper[5055]: I1011 08:43:50.985805 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpkq6\" (UniqueName: \"kubernetes.io/projected/ab6bb86d-fc13-41ec-89bc-19906c68e290-kube-api-access-kpkq6\") pod \"community-operators-vfxmb\" (UID: \"ab6bb86d-fc13-41ec-89bc-19906c68e290\") " pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:43:51 crc kubenswrapper[5055]: I1011 08:43:51.087676 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab6bb86d-fc13-41ec-89bc-19906c68e290-catalog-content\") pod \"community-operators-vfxmb\" (UID: \"ab6bb86d-fc13-41ec-89bc-19906c68e290\") " pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:43:51 crc kubenswrapper[5055]: I1011 08:43:51.087727 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpkq6\" (UniqueName: \"kubernetes.io/projected/ab6bb86d-fc13-41ec-89bc-19906c68e290-kube-api-access-kpkq6\") pod \"community-operators-vfxmb\" (UID: \"ab6bb86d-fc13-41ec-89bc-19906c68e290\") " pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:43:51 crc kubenswrapper[5055]: I1011 08:43:51.087866 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab6bb86d-fc13-41ec-89bc-19906c68e290-utilities\") pod \"community-operators-vfxmb\" (UID: \"ab6bb86d-fc13-41ec-89bc-19906c68e290\") " pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:43:51 crc kubenswrapper[5055]: I1011 08:43:51.088508 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab6bb86d-fc13-41ec-89bc-19906c68e290-utilities\") pod \"community-operators-vfxmb\" (UID: \"ab6bb86d-fc13-41ec-89bc-19906c68e290\") " pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:43:51 crc kubenswrapper[5055]: I1011 08:43:51.088744 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab6bb86d-fc13-41ec-89bc-19906c68e290-catalog-content\") pod \"community-operators-vfxmb\" (UID: \"ab6bb86d-fc13-41ec-89bc-19906c68e290\") " pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:43:51 crc kubenswrapper[5055]: I1011 08:43:51.109168 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpkq6\" (UniqueName: \"kubernetes.io/projected/ab6bb86d-fc13-41ec-89bc-19906c68e290-kube-api-access-kpkq6\") pod \"community-operators-vfxmb\" (UID: \"ab6bb86d-fc13-41ec-89bc-19906c68e290\") " pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:43:51 crc kubenswrapper[5055]: I1011 08:43:51.178648 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:43:51 crc kubenswrapper[5055]: I1011 08:43:51.736001 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vfxmb"] Oct 11 08:43:52 crc kubenswrapper[5055]: I1011 08:43:52.236705 5055 generic.go:334] "Generic (PLEG): container finished" podID="ab6bb86d-fc13-41ec-89bc-19906c68e290" containerID="3c2bad7d73a8a0651a980ded3026f212603b1e20002952461524f1317d81f984" exitCode=0 Oct 11 08:43:52 crc kubenswrapper[5055]: I1011 08:43:52.236752 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfxmb" event={"ID":"ab6bb86d-fc13-41ec-89bc-19906c68e290","Type":"ContainerDied","Data":"3c2bad7d73a8a0651a980ded3026f212603b1e20002952461524f1317d81f984"} Oct 11 08:43:52 crc kubenswrapper[5055]: I1011 08:43:52.236798 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfxmb" event={"ID":"ab6bb86d-fc13-41ec-89bc-19906c68e290","Type":"ContainerStarted","Data":"068746695a224006011787a939dfbb9d3c69f02343a81a758d163c40b8135f56"} Oct 11 08:43:53 crc kubenswrapper[5055]: I1011 08:43:53.245692 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfxmb" event={"ID":"ab6bb86d-fc13-41ec-89bc-19906c68e290","Type":"ContainerStarted","Data":"d714e2e360e8d31afd5ef89b584856aed78da656a5751e004fac0c0dd724e54a"} Oct 11 08:43:53 crc kubenswrapper[5055]: I1011 08:43:53.428479 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sxbpf"] Oct 11 08:43:53 crc kubenswrapper[5055]: I1011 08:43:53.429936 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:43:53 crc kubenswrapper[5055]: I1011 08:43:53.440272 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sxbpf"] Oct 11 08:43:53 crc kubenswrapper[5055]: I1011 08:43:53.623156 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26f86d9f-3915-4fc5-97eb-f0c93cb62133-utilities\") pod \"redhat-marketplace-sxbpf\" (UID: \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\") " pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:43:53 crc kubenswrapper[5055]: I1011 08:43:53.623216 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26f86d9f-3915-4fc5-97eb-f0c93cb62133-catalog-content\") pod \"redhat-marketplace-sxbpf\" (UID: \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\") " pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:43:53 crc kubenswrapper[5055]: I1011 08:43:53.623269 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ll5kf\" (UniqueName: \"kubernetes.io/projected/26f86d9f-3915-4fc5-97eb-f0c93cb62133-kube-api-access-ll5kf\") pod \"redhat-marketplace-sxbpf\" (UID: \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\") " pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:43:53 crc kubenswrapper[5055]: I1011 08:43:53.724379 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ll5kf\" (UniqueName: \"kubernetes.io/projected/26f86d9f-3915-4fc5-97eb-f0c93cb62133-kube-api-access-ll5kf\") pod \"redhat-marketplace-sxbpf\" (UID: \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\") " pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:43:53 crc kubenswrapper[5055]: I1011 08:43:53.724522 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26f86d9f-3915-4fc5-97eb-f0c93cb62133-utilities\") pod \"redhat-marketplace-sxbpf\" (UID: \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\") " pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:43:53 crc kubenswrapper[5055]: I1011 08:43:53.724553 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26f86d9f-3915-4fc5-97eb-f0c93cb62133-catalog-content\") pod \"redhat-marketplace-sxbpf\" (UID: \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\") " pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:43:53 crc kubenswrapper[5055]: I1011 08:43:53.725058 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26f86d9f-3915-4fc5-97eb-f0c93cb62133-utilities\") pod \"redhat-marketplace-sxbpf\" (UID: \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\") " pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:43:53 crc kubenswrapper[5055]: I1011 08:43:53.725089 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26f86d9f-3915-4fc5-97eb-f0c93cb62133-catalog-content\") pod \"redhat-marketplace-sxbpf\" (UID: \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\") " pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:43:53 crc kubenswrapper[5055]: I1011 08:43:53.745608 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ll5kf\" (UniqueName: \"kubernetes.io/projected/26f86d9f-3915-4fc5-97eb-f0c93cb62133-kube-api-access-ll5kf\") pod \"redhat-marketplace-sxbpf\" (UID: \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\") " pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:43:53 crc kubenswrapper[5055]: I1011 08:43:53.758233 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:43:54 crc kubenswrapper[5055]: I1011 08:43:54.006617 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sxbpf"] Oct 11 08:43:54 crc kubenswrapper[5055]: W1011 08:43:54.007364 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26f86d9f_3915_4fc5_97eb_f0c93cb62133.slice/crio-d2ebc70cab42b6a0f2a64b39a5c822330fa7d9838504144463db60891fd7d9e0 WatchSource:0}: Error finding container d2ebc70cab42b6a0f2a64b39a5c822330fa7d9838504144463db60891fd7d9e0: Status 404 returned error can't find the container with id d2ebc70cab42b6a0f2a64b39a5c822330fa7d9838504144463db60891fd7d9e0 Oct 11 08:43:54 crc kubenswrapper[5055]: I1011 08:43:54.254208 5055 generic.go:334] "Generic (PLEG): container finished" podID="ab6bb86d-fc13-41ec-89bc-19906c68e290" containerID="d714e2e360e8d31afd5ef89b584856aed78da656a5751e004fac0c0dd724e54a" exitCode=0 Oct 11 08:43:54 crc kubenswrapper[5055]: I1011 08:43:54.254280 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfxmb" event={"ID":"ab6bb86d-fc13-41ec-89bc-19906c68e290","Type":"ContainerDied","Data":"d714e2e360e8d31afd5ef89b584856aed78da656a5751e004fac0c0dd724e54a"} Oct 11 08:43:54 crc kubenswrapper[5055]: I1011 08:43:54.256235 5055 generic.go:334] "Generic (PLEG): container finished" podID="26f86d9f-3915-4fc5-97eb-f0c93cb62133" containerID="2c67d532e1b9ad3665b233ee6a0279a1b500e02ce5cf2944c26b32979536e785" exitCode=0 Oct 11 08:43:54 crc kubenswrapper[5055]: I1011 08:43:54.256268 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sxbpf" event={"ID":"26f86d9f-3915-4fc5-97eb-f0c93cb62133","Type":"ContainerDied","Data":"2c67d532e1b9ad3665b233ee6a0279a1b500e02ce5cf2944c26b32979536e785"} Oct 11 08:43:54 crc kubenswrapper[5055]: I1011 08:43:54.256291 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sxbpf" event={"ID":"26f86d9f-3915-4fc5-97eb-f0c93cb62133","Type":"ContainerStarted","Data":"d2ebc70cab42b6a0f2a64b39a5c822330fa7d9838504144463db60891fd7d9e0"} Oct 11 08:43:55 crc kubenswrapper[5055]: I1011 08:43:55.269613 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfxmb" event={"ID":"ab6bb86d-fc13-41ec-89bc-19906c68e290","Type":"ContainerStarted","Data":"4c594ee0423b898729ef632cc7c1a872a6f7177f8756a4b9f1986f624810fcb5"} Oct 11 08:43:55 crc kubenswrapper[5055]: I1011 08:43:55.272810 5055 generic.go:334] "Generic (PLEG): container finished" podID="26f86d9f-3915-4fc5-97eb-f0c93cb62133" containerID="bae6ca5730806bfeea44f794acdc2075235eb385ddc78c7840b23e3f923b82e3" exitCode=0 Oct 11 08:43:55 crc kubenswrapper[5055]: I1011 08:43:55.272859 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sxbpf" event={"ID":"26f86d9f-3915-4fc5-97eb-f0c93cb62133","Type":"ContainerDied","Data":"bae6ca5730806bfeea44f794acdc2075235eb385ddc78c7840b23e3f923b82e3"} Oct 11 08:43:55 crc kubenswrapper[5055]: I1011 08:43:55.293508 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vfxmb" podStartSLOduration=2.7480558630000003 podStartE2EDuration="5.29347622s" podCreationTimestamp="2025-10-11 08:43:50 +0000 UTC" firstStartedPulling="2025-10-11 08:43:52.238249501 +0000 UTC m=+6616.012523308" lastFinishedPulling="2025-10-11 08:43:54.783669818 +0000 UTC m=+6618.557943665" observedRunningTime="2025-10-11 08:43:55.289378373 +0000 UTC m=+6619.063652200" watchObservedRunningTime="2025-10-11 08:43:55.29347622 +0000 UTC m=+6619.067750067" Oct 11 08:43:56 crc kubenswrapper[5055]: I1011 08:43:56.281090 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sxbpf" event={"ID":"26f86d9f-3915-4fc5-97eb-f0c93cb62133","Type":"ContainerStarted","Data":"250bd5b652b82fb2137ec67925ea090c2cb37999c015d41cabb02dced2d88424"} Oct 11 08:43:56 crc kubenswrapper[5055]: I1011 08:43:56.333345 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sxbpf" podStartSLOduration=1.6643494049999998 podStartE2EDuration="3.333328149s" podCreationTimestamp="2025-10-11 08:43:53 +0000 UTC" firstStartedPulling="2025-10-11 08:43:54.257692426 +0000 UTC m=+6618.031966253" lastFinishedPulling="2025-10-11 08:43:55.92667117 +0000 UTC m=+6619.700944997" observedRunningTime="2025-10-11 08:43:56.326418793 +0000 UTC m=+6620.100692600" watchObservedRunningTime="2025-10-11 08:43:56.333328149 +0000 UTC m=+6620.107601956" Oct 11 08:43:59 crc kubenswrapper[5055]: I1011 08:43:59.994064 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:43:59 crc kubenswrapper[5055]: E1011 08:43:59.994571 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:44:01 crc kubenswrapper[5055]: I1011 08:44:01.179526 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:44:01 crc kubenswrapper[5055]: I1011 08:44:01.180437 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:44:01 crc kubenswrapper[5055]: I1011 08:44:01.257957 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:44:01 crc kubenswrapper[5055]: I1011 08:44:01.365956 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:44:03 crc kubenswrapper[5055]: I1011 08:44:03.033693 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vfxmb"] Oct 11 08:44:03 crc kubenswrapper[5055]: I1011 08:44:03.330572 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vfxmb" podUID="ab6bb86d-fc13-41ec-89bc-19906c68e290" containerName="registry-server" containerID="cri-o://4c594ee0423b898729ef632cc7c1a872a6f7177f8756a4b9f1986f624810fcb5" gracePeriod=2 Oct 11 08:44:03 crc kubenswrapper[5055]: I1011 08:44:03.758726 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:44:03 crc kubenswrapper[5055]: I1011 08:44:03.758793 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:44:03 crc kubenswrapper[5055]: I1011 08:44:03.801902 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:44:03 crc kubenswrapper[5055]: I1011 08:44:03.818662 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:44:03 crc kubenswrapper[5055]: I1011 08:44:03.974722 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab6bb86d-fc13-41ec-89bc-19906c68e290-catalog-content\") pod \"ab6bb86d-fc13-41ec-89bc-19906c68e290\" (UID: \"ab6bb86d-fc13-41ec-89bc-19906c68e290\") " Oct 11 08:44:03 crc kubenswrapper[5055]: I1011 08:44:03.974837 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpkq6\" (UniqueName: \"kubernetes.io/projected/ab6bb86d-fc13-41ec-89bc-19906c68e290-kube-api-access-kpkq6\") pod \"ab6bb86d-fc13-41ec-89bc-19906c68e290\" (UID: \"ab6bb86d-fc13-41ec-89bc-19906c68e290\") " Oct 11 08:44:03 crc kubenswrapper[5055]: I1011 08:44:03.974947 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab6bb86d-fc13-41ec-89bc-19906c68e290-utilities\") pod \"ab6bb86d-fc13-41ec-89bc-19906c68e290\" (UID: \"ab6bb86d-fc13-41ec-89bc-19906c68e290\") " Oct 11 08:44:03 crc kubenswrapper[5055]: I1011 08:44:03.976195 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab6bb86d-fc13-41ec-89bc-19906c68e290-utilities" (OuterVolumeSpecName: "utilities") pod "ab6bb86d-fc13-41ec-89bc-19906c68e290" (UID: "ab6bb86d-fc13-41ec-89bc-19906c68e290"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:44:03 crc kubenswrapper[5055]: I1011 08:44:03.984379 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab6bb86d-fc13-41ec-89bc-19906c68e290-kube-api-access-kpkq6" (OuterVolumeSpecName: "kube-api-access-kpkq6") pod "ab6bb86d-fc13-41ec-89bc-19906c68e290" (UID: "ab6bb86d-fc13-41ec-89bc-19906c68e290"). InnerVolumeSpecName "kube-api-access-kpkq6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.017941 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab6bb86d-fc13-41ec-89bc-19906c68e290-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ab6bb86d-fc13-41ec-89bc-19906c68e290" (UID: "ab6bb86d-fc13-41ec-89bc-19906c68e290"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.076618 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpkq6\" (UniqueName: \"kubernetes.io/projected/ab6bb86d-fc13-41ec-89bc-19906c68e290-kube-api-access-kpkq6\") on node \"crc\" DevicePath \"\"" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.076660 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab6bb86d-fc13-41ec-89bc-19906c68e290-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.076676 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab6bb86d-fc13-41ec-89bc-19906c68e290-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.343239 5055 generic.go:334] "Generic (PLEG): container finished" podID="ab6bb86d-fc13-41ec-89bc-19906c68e290" containerID="4c594ee0423b898729ef632cc7c1a872a6f7177f8756a4b9f1986f624810fcb5" exitCode=0 Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.343350 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfxmb" event={"ID":"ab6bb86d-fc13-41ec-89bc-19906c68e290","Type":"ContainerDied","Data":"4c594ee0423b898729ef632cc7c1a872a6f7177f8756a4b9f1986f624810fcb5"} Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.343432 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfxmb" event={"ID":"ab6bb86d-fc13-41ec-89bc-19906c68e290","Type":"ContainerDied","Data":"068746695a224006011787a939dfbb9d3c69f02343a81a758d163c40b8135f56"} Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.343473 5055 scope.go:117] "RemoveContainer" containerID="4c594ee0423b898729ef632cc7c1a872a6f7177f8756a4b9f1986f624810fcb5" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.343380 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vfxmb" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.376338 5055 scope.go:117] "RemoveContainer" containerID="d714e2e360e8d31afd5ef89b584856aed78da656a5751e004fac0c0dd724e54a" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.425547 5055 scope.go:117] "RemoveContainer" containerID="3c2bad7d73a8a0651a980ded3026f212603b1e20002952461524f1317d81f984" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.427152 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vfxmb"] Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.428658 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.436722 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vfxmb"] Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.474027 5055 scope.go:117] "RemoveContainer" containerID="4c594ee0423b898729ef632cc7c1a872a6f7177f8756a4b9f1986f624810fcb5" Oct 11 08:44:04 crc kubenswrapper[5055]: E1011 08:44:04.474492 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c594ee0423b898729ef632cc7c1a872a6f7177f8756a4b9f1986f624810fcb5\": container with ID starting with 4c594ee0423b898729ef632cc7c1a872a6f7177f8756a4b9f1986f624810fcb5 not found: ID does not exist" containerID="4c594ee0423b898729ef632cc7c1a872a6f7177f8756a4b9f1986f624810fcb5" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.474576 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c594ee0423b898729ef632cc7c1a872a6f7177f8756a4b9f1986f624810fcb5"} err="failed to get container status \"4c594ee0423b898729ef632cc7c1a872a6f7177f8756a4b9f1986f624810fcb5\": rpc error: code = NotFound desc = could not find container \"4c594ee0423b898729ef632cc7c1a872a6f7177f8756a4b9f1986f624810fcb5\": container with ID starting with 4c594ee0423b898729ef632cc7c1a872a6f7177f8756a4b9f1986f624810fcb5 not found: ID does not exist" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.474643 5055 scope.go:117] "RemoveContainer" containerID="d714e2e360e8d31afd5ef89b584856aed78da656a5751e004fac0c0dd724e54a" Oct 11 08:44:04 crc kubenswrapper[5055]: E1011 08:44:04.475208 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d714e2e360e8d31afd5ef89b584856aed78da656a5751e004fac0c0dd724e54a\": container with ID starting with d714e2e360e8d31afd5ef89b584856aed78da656a5751e004fac0c0dd724e54a not found: ID does not exist" containerID="d714e2e360e8d31afd5ef89b584856aed78da656a5751e004fac0c0dd724e54a" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.475254 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d714e2e360e8d31afd5ef89b584856aed78da656a5751e004fac0c0dd724e54a"} err="failed to get container status \"d714e2e360e8d31afd5ef89b584856aed78da656a5751e004fac0c0dd724e54a\": rpc error: code = NotFound desc = could not find container \"d714e2e360e8d31afd5ef89b584856aed78da656a5751e004fac0c0dd724e54a\": container with ID starting with d714e2e360e8d31afd5ef89b584856aed78da656a5751e004fac0c0dd724e54a not found: ID does not exist" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.475283 5055 scope.go:117] "RemoveContainer" containerID="3c2bad7d73a8a0651a980ded3026f212603b1e20002952461524f1317d81f984" Oct 11 08:44:04 crc kubenswrapper[5055]: E1011 08:44:04.475585 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c2bad7d73a8a0651a980ded3026f212603b1e20002952461524f1317d81f984\": container with ID starting with 3c2bad7d73a8a0651a980ded3026f212603b1e20002952461524f1317d81f984 not found: ID does not exist" containerID="3c2bad7d73a8a0651a980ded3026f212603b1e20002952461524f1317d81f984" Oct 11 08:44:04 crc kubenswrapper[5055]: I1011 08:44:04.475671 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c2bad7d73a8a0651a980ded3026f212603b1e20002952461524f1317d81f984"} err="failed to get container status \"3c2bad7d73a8a0651a980ded3026f212603b1e20002952461524f1317d81f984\": rpc error: code = NotFound desc = could not find container \"3c2bad7d73a8a0651a980ded3026f212603b1e20002952461524f1317d81f984\": container with ID starting with 3c2bad7d73a8a0651a980ded3026f212603b1e20002952461524f1317d81f984 not found: ID does not exist" Oct 11 08:44:05 crc kubenswrapper[5055]: I1011 08:44:05.013501 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab6bb86d-fc13-41ec-89bc-19906c68e290" path="/var/lib/kubelet/pods/ab6bb86d-fc13-41ec-89bc-19906c68e290/volumes" Oct 11 08:44:06 crc kubenswrapper[5055]: I1011 08:44:06.228173 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sxbpf"] Oct 11 08:44:06 crc kubenswrapper[5055]: I1011 08:44:06.364645 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-sxbpf" podUID="26f86d9f-3915-4fc5-97eb-f0c93cb62133" containerName="registry-server" containerID="cri-o://250bd5b652b82fb2137ec67925ea090c2cb37999c015d41cabb02dced2d88424" gracePeriod=2 Oct 11 08:44:06 crc kubenswrapper[5055]: I1011 08:44:06.769849 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:44:06 crc kubenswrapper[5055]: I1011 08:44:06.918852 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ll5kf\" (UniqueName: \"kubernetes.io/projected/26f86d9f-3915-4fc5-97eb-f0c93cb62133-kube-api-access-ll5kf\") pod \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\" (UID: \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\") " Oct 11 08:44:06 crc kubenswrapper[5055]: I1011 08:44:06.918958 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26f86d9f-3915-4fc5-97eb-f0c93cb62133-catalog-content\") pod \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\" (UID: \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\") " Oct 11 08:44:06 crc kubenswrapper[5055]: I1011 08:44:06.919008 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26f86d9f-3915-4fc5-97eb-f0c93cb62133-utilities\") pod \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\" (UID: \"26f86d9f-3915-4fc5-97eb-f0c93cb62133\") " Oct 11 08:44:06 crc kubenswrapper[5055]: I1011 08:44:06.920018 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26f86d9f-3915-4fc5-97eb-f0c93cb62133-utilities" (OuterVolumeSpecName: "utilities") pod "26f86d9f-3915-4fc5-97eb-f0c93cb62133" (UID: "26f86d9f-3915-4fc5-97eb-f0c93cb62133"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:44:06 crc kubenswrapper[5055]: I1011 08:44:06.927452 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26f86d9f-3915-4fc5-97eb-f0c93cb62133-kube-api-access-ll5kf" (OuterVolumeSpecName: "kube-api-access-ll5kf") pod "26f86d9f-3915-4fc5-97eb-f0c93cb62133" (UID: "26f86d9f-3915-4fc5-97eb-f0c93cb62133"). InnerVolumeSpecName "kube-api-access-ll5kf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:44:06 crc kubenswrapper[5055]: I1011 08:44:06.938713 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26f86d9f-3915-4fc5-97eb-f0c93cb62133-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "26f86d9f-3915-4fc5-97eb-f0c93cb62133" (UID: "26f86d9f-3915-4fc5-97eb-f0c93cb62133"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.021385 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ll5kf\" (UniqueName: \"kubernetes.io/projected/26f86d9f-3915-4fc5-97eb-f0c93cb62133-kube-api-access-ll5kf\") on node \"crc\" DevicePath \"\"" Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.021463 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26f86d9f-3915-4fc5-97eb-f0c93cb62133-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.021485 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26f86d9f-3915-4fc5-97eb-f0c93cb62133-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.372054 5055 generic.go:334] "Generic (PLEG): container finished" podID="26f86d9f-3915-4fc5-97eb-f0c93cb62133" containerID="250bd5b652b82fb2137ec67925ea090c2cb37999c015d41cabb02dced2d88424" exitCode=0 Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.372094 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sxbpf" Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.372105 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sxbpf" event={"ID":"26f86d9f-3915-4fc5-97eb-f0c93cb62133","Type":"ContainerDied","Data":"250bd5b652b82fb2137ec67925ea090c2cb37999c015d41cabb02dced2d88424"} Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.372171 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sxbpf" event={"ID":"26f86d9f-3915-4fc5-97eb-f0c93cb62133","Type":"ContainerDied","Data":"d2ebc70cab42b6a0f2a64b39a5c822330fa7d9838504144463db60891fd7d9e0"} Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.372193 5055 scope.go:117] "RemoveContainer" containerID="250bd5b652b82fb2137ec67925ea090c2cb37999c015d41cabb02dced2d88424" Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.387153 5055 scope.go:117] "RemoveContainer" containerID="bae6ca5730806bfeea44f794acdc2075235eb385ddc78c7840b23e3f923b82e3" Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.404675 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sxbpf"] Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.410451 5055 scope.go:117] "RemoveContainer" containerID="2c67d532e1b9ad3665b233ee6a0279a1b500e02ce5cf2944c26b32979536e785" Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.414580 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-sxbpf"] Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.425047 5055 scope.go:117] "RemoveContainer" containerID="250bd5b652b82fb2137ec67925ea090c2cb37999c015d41cabb02dced2d88424" Oct 11 08:44:07 crc kubenswrapper[5055]: E1011 08:44:07.425452 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"250bd5b652b82fb2137ec67925ea090c2cb37999c015d41cabb02dced2d88424\": container with ID starting with 250bd5b652b82fb2137ec67925ea090c2cb37999c015d41cabb02dced2d88424 not found: ID does not exist" containerID="250bd5b652b82fb2137ec67925ea090c2cb37999c015d41cabb02dced2d88424" Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.425492 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"250bd5b652b82fb2137ec67925ea090c2cb37999c015d41cabb02dced2d88424"} err="failed to get container status \"250bd5b652b82fb2137ec67925ea090c2cb37999c015d41cabb02dced2d88424\": rpc error: code = NotFound desc = could not find container \"250bd5b652b82fb2137ec67925ea090c2cb37999c015d41cabb02dced2d88424\": container with ID starting with 250bd5b652b82fb2137ec67925ea090c2cb37999c015d41cabb02dced2d88424 not found: ID does not exist" Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.425518 5055 scope.go:117] "RemoveContainer" containerID="bae6ca5730806bfeea44f794acdc2075235eb385ddc78c7840b23e3f923b82e3" Oct 11 08:44:07 crc kubenswrapper[5055]: E1011 08:44:07.425861 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bae6ca5730806bfeea44f794acdc2075235eb385ddc78c7840b23e3f923b82e3\": container with ID starting with bae6ca5730806bfeea44f794acdc2075235eb385ddc78c7840b23e3f923b82e3 not found: ID does not exist" containerID="bae6ca5730806bfeea44f794acdc2075235eb385ddc78c7840b23e3f923b82e3" Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.425908 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bae6ca5730806bfeea44f794acdc2075235eb385ddc78c7840b23e3f923b82e3"} err="failed to get container status \"bae6ca5730806bfeea44f794acdc2075235eb385ddc78c7840b23e3f923b82e3\": rpc error: code = NotFound desc = could not find container \"bae6ca5730806bfeea44f794acdc2075235eb385ddc78c7840b23e3f923b82e3\": container with ID starting with bae6ca5730806bfeea44f794acdc2075235eb385ddc78c7840b23e3f923b82e3 not found: ID does not exist" Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.425934 5055 scope.go:117] "RemoveContainer" containerID="2c67d532e1b9ad3665b233ee6a0279a1b500e02ce5cf2944c26b32979536e785" Oct 11 08:44:07 crc kubenswrapper[5055]: E1011 08:44:07.426207 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c67d532e1b9ad3665b233ee6a0279a1b500e02ce5cf2944c26b32979536e785\": container with ID starting with 2c67d532e1b9ad3665b233ee6a0279a1b500e02ce5cf2944c26b32979536e785 not found: ID does not exist" containerID="2c67d532e1b9ad3665b233ee6a0279a1b500e02ce5cf2944c26b32979536e785" Oct 11 08:44:07 crc kubenswrapper[5055]: I1011 08:44:07.426233 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c67d532e1b9ad3665b233ee6a0279a1b500e02ce5cf2944c26b32979536e785"} err="failed to get container status \"2c67d532e1b9ad3665b233ee6a0279a1b500e02ce5cf2944c26b32979536e785\": rpc error: code = NotFound desc = could not find container \"2c67d532e1b9ad3665b233ee6a0279a1b500e02ce5cf2944c26b32979536e785\": container with ID starting with 2c67d532e1b9ad3665b233ee6a0279a1b500e02ce5cf2944c26b32979536e785 not found: ID does not exist" Oct 11 08:44:09 crc kubenswrapper[5055]: I1011 08:44:09.012114 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26f86d9f-3915-4fc5-97eb-f0c93cb62133" path="/var/lib/kubelet/pods/26f86d9f-3915-4fc5-97eb-f0c93cb62133/volumes" Oct 11 08:44:10 crc kubenswrapper[5055]: I1011 08:44:10.994098 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:44:11 crc kubenswrapper[5055]: I1011 08:44:11.412755 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"99c8434f0cc4fbf0485d3f54572fbd27fdb42980e02f46eecdbaaf2590abb816"} Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.202317 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps"] Oct 11 08:45:00 crc kubenswrapper[5055]: E1011 08:45:00.203286 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab6bb86d-fc13-41ec-89bc-19906c68e290" containerName="registry-server" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.203302 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab6bb86d-fc13-41ec-89bc-19906c68e290" containerName="registry-server" Oct 11 08:45:00 crc kubenswrapper[5055]: E1011 08:45:00.203320 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab6bb86d-fc13-41ec-89bc-19906c68e290" containerName="extract-utilities" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.203328 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab6bb86d-fc13-41ec-89bc-19906c68e290" containerName="extract-utilities" Oct 11 08:45:00 crc kubenswrapper[5055]: E1011 08:45:00.203343 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26f86d9f-3915-4fc5-97eb-f0c93cb62133" containerName="extract-utilities" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.203351 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="26f86d9f-3915-4fc5-97eb-f0c93cb62133" containerName="extract-utilities" Oct 11 08:45:00 crc kubenswrapper[5055]: E1011 08:45:00.203365 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26f86d9f-3915-4fc5-97eb-f0c93cb62133" containerName="registry-server" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.203373 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="26f86d9f-3915-4fc5-97eb-f0c93cb62133" containerName="registry-server" Oct 11 08:45:00 crc kubenswrapper[5055]: E1011 08:45:00.203386 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab6bb86d-fc13-41ec-89bc-19906c68e290" containerName="extract-content" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.203395 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab6bb86d-fc13-41ec-89bc-19906c68e290" containerName="extract-content" Oct 11 08:45:00 crc kubenswrapper[5055]: E1011 08:45:00.203425 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26f86d9f-3915-4fc5-97eb-f0c93cb62133" containerName="extract-content" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.203433 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="26f86d9f-3915-4fc5-97eb-f0c93cb62133" containerName="extract-content" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.203607 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="26f86d9f-3915-4fc5-97eb-f0c93cb62133" containerName="registry-server" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.203634 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab6bb86d-fc13-41ec-89bc-19906c68e290" containerName="registry-server" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.204462 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.208331 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.209030 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.217992 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps"] Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.368040 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhpqm\" (UniqueName: \"kubernetes.io/projected/8a637a5c-aa02-4559-adcb-38337449b1b1-kube-api-access-vhpqm\") pod \"collect-profiles-29336205-pmnps\" (UID: \"8a637a5c-aa02-4559-adcb-38337449b1b1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.368134 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8a637a5c-aa02-4559-adcb-38337449b1b1-secret-volume\") pod \"collect-profiles-29336205-pmnps\" (UID: \"8a637a5c-aa02-4559-adcb-38337449b1b1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.368199 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8a637a5c-aa02-4559-adcb-38337449b1b1-config-volume\") pod \"collect-profiles-29336205-pmnps\" (UID: \"8a637a5c-aa02-4559-adcb-38337449b1b1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.469918 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8a637a5c-aa02-4559-adcb-38337449b1b1-secret-volume\") pod \"collect-profiles-29336205-pmnps\" (UID: \"8a637a5c-aa02-4559-adcb-38337449b1b1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.471147 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8a637a5c-aa02-4559-adcb-38337449b1b1-config-volume\") pod \"collect-profiles-29336205-pmnps\" (UID: \"8a637a5c-aa02-4559-adcb-38337449b1b1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.471203 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhpqm\" (UniqueName: \"kubernetes.io/projected/8a637a5c-aa02-4559-adcb-38337449b1b1-kube-api-access-vhpqm\") pod \"collect-profiles-29336205-pmnps\" (UID: \"8a637a5c-aa02-4559-adcb-38337449b1b1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.472195 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8a637a5c-aa02-4559-adcb-38337449b1b1-config-volume\") pod \"collect-profiles-29336205-pmnps\" (UID: \"8a637a5c-aa02-4559-adcb-38337449b1b1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.479113 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8a637a5c-aa02-4559-adcb-38337449b1b1-secret-volume\") pod \"collect-profiles-29336205-pmnps\" (UID: \"8a637a5c-aa02-4559-adcb-38337449b1b1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.490665 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhpqm\" (UniqueName: \"kubernetes.io/projected/8a637a5c-aa02-4559-adcb-38337449b1b1-kube-api-access-vhpqm\") pod \"collect-profiles-29336205-pmnps\" (UID: \"8a637a5c-aa02-4559-adcb-38337449b1b1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.527458 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.776230 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps"] Oct 11 08:45:00 crc kubenswrapper[5055]: W1011 08:45:00.784952 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a637a5c_aa02_4559_adcb_38337449b1b1.slice/crio-e46be450623ec50dea265ef1b4b943e17e86530b8b009ffb8f58d279776af1e6 WatchSource:0}: Error finding container e46be450623ec50dea265ef1b4b943e17e86530b8b009ffb8f58d279776af1e6: Status 404 returned error can't find the container with id e46be450623ec50dea265ef1b4b943e17e86530b8b009ffb8f58d279776af1e6 Oct 11 08:45:00 crc kubenswrapper[5055]: I1011 08:45:00.898814 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" event={"ID":"8a637a5c-aa02-4559-adcb-38337449b1b1","Type":"ContainerStarted","Data":"e46be450623ec50dea265ef1b4b943e17e86530b8b009ffb8f58d279776af1e6"} Oct 11 08:45:01 crc kubenswrapper[5055]: I1011 08:45:01.907086 5055 generic.go:334] "Generic (PLEG): container finished" podID="8a637a5c-aa02-4559-adcb-38337449b1b1" containerID="68249bc287e6661f95e6626219531d203aab1ba14ae0a030e9d7205307948c4e" exitCode=0 Oct 11 08:45:01 crc kubenswrapper[5055]: I1011 08:45:01.907130 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" event={"ID":"8a637a5c-aa02-4559-adcb-38337449b1b1","Type":"ContainerDied","Data":"68249bc287e6661f95e6626219531d203aab1ba14ae0a030e9d7205307948c4e"} Oct 11 08:45:03 crc kubenswrapper[5055]: I1011 08:45:03.257224 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" Oct 11 08:45:03 crc kubenswrapper[5055]: I1011 08:45:03.418818 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8a637a5c-aa02-4559-adcb-38337449b1b1-config-volume\") pod \"8a637a5c-aa02-4559-adcb-38337449b1b1\" (UID: \"8a637a5c-aa02-4559-adcb-38337449b1b1\") " Oct 11 08:45:03 crc kubenswrapper[5055]: I1011 08:45:03.418946 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhpqm\" (UniqueName: \"kubernetes.io/projected/8a637a5c-aa02-4559-adcb-38337449b1b1-kube-api-access-vhpqm\") pod \"8a637a5c-aa02-4559-adcb-38337449b1b1\" (UID: \"8a637a5c-aa02-4559-adcb-38337449b1b1\") " Oct 11 08:45:03 crc kubenswrapper[5055]: I1011 08:45:03.419065 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8a637a5c-aa02-4559-adcb-38337449b1b1-secret-volume\") pod \"8a637a5c-aa02-4559-adcb-38337449b1b1\" (UID: \"8a637a5c-aa02-4559-adcb-38337449b1b1\") " Oct 11 08:45:03 crc kubenswrapper[5055]: I1011 08:45:03.420086 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a637a5c-aa02-4559-adcb-38337449b1b1-config-volume" (OuterVolumeSpecName: "config-volume") pod "8a637a5c-aa02-4559-adcb-38337449b1b1" (UID: "8a637a5c-aa02-4559-adcb-38337449b1b1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 08:45:03 crc kubenswrapper[5055]: I1011 08:45:03.427100 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a637a5c-aa02-4559-adcb-38337449b1b1-kube-api-access-vhpqm" (OuterVolumeSpecName: "kube-api-access-vhpqm") pod "8a637a5c-aa02-4559-adcb-38337449b1b1" (UID: "8a637a5c-aa02-4559-adcb-38337449b1b1"). InnerVolumeSpecName "kube-api-access-vhpqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:45:03 crc kubenswrapper[5055]: I1011 08:45:03.427592 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a637a5c-aa02-4559-adcb-38337449b1b1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8a637a5c-aa02-4559-adcb-38337449b1b1" (UID: "8a637a5c-aa02-4559-adcb-38337449b1b1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 08:45:03 crc kubenswrapper[5055]: I1011 08:45:03.521850 5055 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8a637a5c-aa02-4559-adcb-38337449b1b1-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 08:45:03 crc kubenswrapper[5055]: I1011 08:45:03.521941 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhpqm\" (UniqueName: \"kubernetes.io/projected/8a637a5c-aa02-4559-adcb-38337449b1b1-kube-api-access-vhpqm\") on node \"crc\" DevicePath \"\"" Oct 11 08:45:03 crc kubenswrapper[5055]: I1011 08:45:03.521960 5055 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8a637a5c-aa02-4559-adcb-38337449b1b1-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 08:45:03 crc kubenswrapper[5055]: I1011 08:45:03.933668 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" event={"ID":"8a637a5c-aa02-4559-adcb-38337449b1b1","Type":"ContainerDied","Data":"e46be450623ec50dea265ef1b4b943e17e86530b8b009ffb8f58d279776af1e6"} Oct 11 08:45:03 crc kubenswrapper[5055]: I1011 08:45:03.933726 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e46be450623ec50dea265ef1b4b943e17e86530b8b009ffb8f58d279776af1e6" Oct 11 08:45:03 crc kubenswrapper[5055]: I1011 08:45:03.933838 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336205-pmnps" Oct 11 08:45:04 crc kubenswrapper[5055]: I1011 08:45:04.329414 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2"] Oct 11 08:45:04 crc kubenswrapper[5055]: I1011 08:45:04.335302 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336160-zdtp2"] Oct 11 08:45:05 crc kubenswrapper[5055]: I1011 08:45:05.006606 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac" path="/var/lib/kubelet/pods/38cb3c67-41a1-4c17-ab35-bfe6b8eb4aac/volumes" Oct 11 08:45:54 crc kubenswrapper[5055]: I1011 08:45:54.879302 5055 scope.go:117] "RemoveContainer" containerID="0a1b5bd95ead33c60c698b396fbc96742f6367a2d9b80f265308f54acf50c108" Oct 11 08:46:32 crc kubenswrapper[5055]: I1011 08:46:32.422539 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:46:32 crc kubenswrapper[5055]: I1011 08:46:32.423296 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:47:02 crc kubenswrapper[5055]: I1011 08:47:02.422903 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:47:02 crc kubenswrapper[5055]: I1011 08:47:02.423676 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:47:32 crc kubenswrapper[5055]: I1011 08:47:32.422365 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:47:32 crc kubenswrapper[5055]: I1011 08:47:32.423242 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:47:32 crc kubenswrapper[5055]: I1011 08:47:32.425045 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 08:47:32 crc kubenswrapper[5055]: I1011 08:47:32.425961 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"99c8434f0cc4fbf0485d3f54572fbd27fdb42980e02f46eecdbaaf2590abb816"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 08:47:32 crc kubenswrapper[5055]: I1011 08:47:32.426083 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://99c8434f0cc4fbf0485d3f54572fbd27fdb42980e02f46eecdbaaf2590abb816" gracePeriod=600 Oct 11 08:47:33 crc kubenswrapper[5055]: I1011 08:47:33.209656 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="99c8434f0cc4fbf0485d3f54572fbd27fdb42980e02f46eecdbaaf2590abb816" exitCode=0 Oct 11 08:47:33 crc kubenswrapper[5055]: I1011 08:47:33.209749 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"99c8434f0cc4fbf0485d3f54572fbd27fdb42980e02f46eecdbaaf2590abb816"} Oct 11 08:47:33 crc kubenswrapper[5055]: I1011 08:47:33.210243 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d"} Oct 11 08:47:33 crc kubenswrapper[5055]: I1011 08:47:33.210288 5055 scope.go:117] "RemoveContainer" containerID="314db44ef87f74442ee289f2d1c19dae471b53a8758623c208f980223076fdf3" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.308809 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tm76h"] Oct 11 08:47:37 crc kubenswrapper[5055]: E1011 08:47:37.309790 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a637a5c-aa02-4559-adcb-38337449b1b1" containerName="collect-profiles" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.309814 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a637a5c-aa02-4559-adcb-38337449b1b1" containerName="collect-profiles" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.310003 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a637a5c-aa02-4559-adcb-38337449b1b1" containerName="collect-profiles" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.311323 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.325681 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tm76h"] Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.506634 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57458698-6363-4c34-8b76-51def2118722-utilities\") pod \"certified-operators-tm76h\" (UID: \"57458698-6363-4c34-8b76-51def2118722\") " pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.506989 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57458698-6363-4c34-8b76-51def2118722-catalog-content\") pod \"certified-operators-tm76h\" (UID: \"57458698-6363-4c34-8b76-51def2118722\") " pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.507110 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9r2s\" (UniqueName: \"kubernetes.io/projected/57458698-6363-4c34-8b76-51def2118722-kube-api-access-v9r2s\") pod \"certified-operators-tm76h\" (UID: \"57458698-6363-4c34-8b76-51def2118722\") " pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.607991 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57458698-6363-4c34-8b76-51def2118722-catalog-content\") pod \"certified-operators-tm76h\" (UID: \"57458698-6363-4c34-8b76-51def2118722\") " pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.608317 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9r2s\" (UniqueName: \"kubernetes.io/projected/57458698-6363-4c34-8b76-51def2118722-kube-api-access-v9r2s\") pod \"certified-operators-tm76h\" (UID: \"57458698-6363-4c34-8b76-51def2118722\") " pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.608488 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57458698-6363-4c34-8b76-51def2118722-utilities\") pod \"certified-operators-tm76h\" (UID: \"57458698-6363-4c34-8b76-51def2118722\") " pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.608536 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57458698-6363-4c34-8b76-51def2118722-catalog-content\") pod \"certified-operators-tm76h\" (UID: \"57458698-6363-4c34-8b76-51def2118722\") " pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.608957 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57458698-6363-4c34-8b76-51def2118722-utilities\") pod \"certified-operators-tm76h\" (UID: \"57458698-6363-4c34-8b76-51def2118722\") " pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.628757 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9r2s\" (UniqueName: \"kubernetes.io/projected/57458698-6363-4c34-8b76-51def2118722-kube-api-access-v9r2s\") pod \"certified-operators-tm76h\" (UID: \"57458698-6363-4c34-8b76-51def2118722\") " pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.716371 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:37 crc kubenswrapper[5055]: I1011 08:47:37.982106 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tm76h"] Oct 11 08:47:38 crc kubenswrapper[5055]: I1011 08:47:38.251511 5055 generic.go:334] "Generic (PLEG): container finished" podID="57458698-6363-4c34-8b76-51def2118722" containerID="ca9080629372db21087c8e9f37d0dd0212280a4b4fd97dfabe7450ffa2494d66" exitCode=0 Oct 11 08:47:38 crc kubenswrapper[5055]: I1011 08:47:38.251699 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tm76h" event={"ID":"57458698-6363-4c34-8b76-51def2118722","Type":"ContainerDied","Data":"ca9080629372db21087c8e9f37d0dd0212280a4b4fd97dfabe7450ffa2494d66"} Oct 11 08:47:38 crc kubenswrapper[5055]: I1011 08:47:38.251727 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tm76h" event={"ID":"57458698-6363-4c34-8b76-51def2118722","Type":"ContainerStarted","Data":"4012bdcd5854f3a0fc34e160aed3b9f9d5582c4ce9d58618f811661caadbc6ba"} Oct 11 08:47:38 crc kubenswrapper[5055]: I1011 08:47:38.253585 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 08:47:39 crc kubenswrapper[5055]: I1011 08:47:39.259929 5055 generic.go:334] "Generic (PLEG): container finished" podID="57458698-6363-4c34-8b76-51def2118722" containerID="aea35120508a1313fd474864532fc9684d824a25aaed4d1d829b85de1dff9fbe" exitCode=0 Oct 11 08:47:39 crc kubenswrapper[5055]: I1011 08:47:39.260010 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tm76h" event={"ID":"57458698-6363-4c34-8b76-51def2118722","Type":"ContainerDied","Data":"aea35120508a1313fd474864532fc9684d824a25aaed4d1d829b85de1dff9fbe"} Oct 11 08:47:40 crc kubenswrapper[5055]: I1011 08:47:40.271818 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tm76h" event={"ID":"57458698-6363-4c34-8b76-51def2118722","Type":"ContainerStarted","Data":"a60ab2468ed7d7764d6569bcbdb9bdfede7761cf5f3bdd24b6b967559b56d923"} Oct 11 08:47:47 crc kubenswrapper[5055]: I1011 08:47:47.716711 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:47 crc kubenswrapper[5055]: I1011 08:47:47.718138 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:47 crc kubenswrapper[5055]: I1011 08:47:47.796239 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:47 crc kubenswrapper[5055]: I1011 08:47:47.833618 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tm76h" podStartSLOduration=9.374283645 podStartE2EDuration="10.833540867s" podCreationTimestamp="2025-10-11 08:47:37 +0000 UTC" firstStartedPulling="2025-10-11 08:47:38.253382375 +0000 UTC m=+6842.027656182" lastFinishedPulling="2025-10-11 08:47:39.712639587 +0000 UTC m=+6843.486913404" observedRunningTime="2025-10-11 08:47:40.294053544 +0000 UTC m=+6844.068327371" watchObservedRunningTime="2025-10-11 08:47:47.833540867 +0000 UTC m=+6851.607814724" Oct 11 08:47:48 crc kubenswrapper[5055]: I1011 08:47:48.391305 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:48 crc kubenswrapper[5055]: I1011 08:47:48.443120 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tm76h"] Oct 11 08:47:50 crc kubenswrapper[5055]: I1011 08:47:50.353568 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tm76h" podUID="57458698-6363-4c34-8b76-51def2118722" containerName="registry-server" containerID="cri-o://a60ab2468ed7d7764d6569bcbdb9bdfede7761cf5f3bdd24b6b967559b56d923" gracePeriod=2 Oct 11 08:47:50 crc kubenswrapper[5055]: I1011 08:47:50.810387 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:50 crc kubenswrapper[5055]: I1011 08:47:50.996967 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v9r2s\" (UniqueName: \"kubernetes.io/projected/57458698-6363-4c34-8b76-51def2118722-kube-api-access-v9r2s\") pod \"57458698-6363-4c34-8b76-51def2118722\" (UID: \"57458698-6363-4c34-8b76-51def2118722\") " Oct 11 08:47:50 crc kubenswrapper[5055]: I1011 08:47:50.997140 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57458698-6363-4c34-8b76-51def2118722-utilities\") pod \"57458698-6363-4c34-8b76-51def2118722\" (UID: \"57458698-6363-4c34-8b76-51def2118722\") " Oct 11 08:47:50 crc kubenswrapper[5055]: I1011 08:47:50.997849 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57458698-6363-4c34-8b76-51def2118722-catalog-content\") pod \"57458698-6363-4c34-8b76-51def2118722\" (UID: \"57458698-6363-4c34-8b76-51def2118722\") " Oct 11 08:47:50 crc kubenswrapper[5055]: I1011 08:47:50.999113 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57458698-6363-4c34-8b76-51def2118722-utilities" (OuterVolumeSpecName: "utilities") pod "57458698-6363-4c34-8b76-51def2118722" (UID: "57458698-6363-4c34-8b76-51def2118722"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.002048 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57458698-6363-4c34-8b76-51def2118722-kube-api-access-v9r2s" (OuterVolumeSpecName: "kube-api-access-v9r2s") pod "57458698-6363-4c34-8b76-51def2118722" (UID: "57458698-6363-4c34-8b76-51def2118722"). InnerVolumeSpecName "kube-api-access-v9r2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.004246 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v9r2s\" (UniqueName: \"kubernetes.io/projected/57458698-6363-4c34-8b76-51def2118722-kube-api-access-v9r2s\") on node \"crc\" DevicePath \"\"" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.004305 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57458698-6363-4c34-8b76-51def2118722-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.039996 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57458698-6363-4c34-8b76-51def2118722-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57458698-6363-4c34-8b76-51def2118722" (UID: "57458698-6363-4c34-8b76-51def2118722"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.105808 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57458698-6363-4c34-8b76-51def2118722-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.369832 5055 generic.go:334] "Generic (PLEG): container finished" podID="57458698-6363-4c34-8b76-51def2118722" containerID="a60ab2468ed7d7764d6569bcbdb9bdfede7761cf5f3bdd24b6b967559b56d923" exitCode=0 Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.369989 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tm76h" event={"ID":"57458698-6363-4c34-8b76-51def2118722","Type":"ContainerDied","Data":"a60ab2468ed7d7764d6569bcbdb9bdfede7761cf5f3bdd24b6b967559b56d923"} Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.370033 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tm76h" event={"ID":"57458698-6363-4c34-8b76-51def2118722","Type":"ContainerDied","Data":"4012bdcd5854f3a0fc34e160aed3b9f9d5582c4ce9d58618f811661caadbc6ba"} Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.370062 5055 scope.go:117] "RemoveContainer" containerID="a60ab2468ed7d7764d6569bcbdb9bdfede7761cf5f3bdd24b6b967559b56d923" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.370237 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tm76h" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.399200 5055 scope.go:117] "RemoveContainer" containerID="aea35120508a1313fd474864532fc9684d824a25aaed4d1d829b85de1dff9fbe" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.429264 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tm76h"] Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.436452 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tm76h"] Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.443514 5055 scope.go:117] "RemoveContainer" containerID="ca9080629372db21087c8e9f37d0dd0212280a4b4fd97dfabe7450ffa2494d66" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.466182 5055 scope.go:117] "RemoveContainer" containerID="a60ab2468ed7d7764d6569bcbdb9bdfede7761cf5f3bdd24b6b967559b56d923" Oct 11 08:47:51 crc kubenswrapper[5055]: E1011 08:47:51.466624 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a60ab2468ed7d7764d6569bcbdb9bdfede7761cf5f3bdd24b6b967559b56d923\": container with ID starting with a60ab2468ed7d7764d6569bcbdb9bdfede7761cf5f3bdd24b6b967559b56d923 not found: ID does not exist" containerID="a60ab2468ed7d7764d6569bcbdb9bdfede7761cf5f3bdd24b6b967559b56d923" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.466668 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a60ab2468ed7d7764d6569bcbdb9bdfede7761cf5f3bdd24b6b967559b56d923"} err="failed to get container status \"a60ab2468ed7d7764d6569bcbdb9bdfede7761cf5f3bdd24b6b967559b56d923\": rpc error: code = NotFound desc = could not find container \"a60ab2468ed7d7764d6569bcbdb9bdfede7761cf5f3bdd24b6b967559b56d923\": container with ID starting with a60ab2468ed7d7764d6569bcbdb9bdfede7761cf5f3bdd24b6b967559b56d923 not found: ID does not exist" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.466695 5055 scope.go:117] "RemoveContainer" containerID="aea35120508a1313fd474864532fc9684d824a25aaed4d1d829b85de1dff9fbe" Oct 11 08:47:51 crc kubenswrapper[5055]: E1011 08:47:51.467293 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aea35120508a1313fd474864532fc9684d824a25aaed4d1d829b85de1dff9fbe\": container with ID starting with aea35120508a1313fd474864532fc9684d824a25aaed4d1d829b85de1dff9fbe not found: ID does not exist" containerID="aea35120508a1313fd474864532fc9684d824a25aaed4d1d829b85de1dff9fbe" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.467334 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aea35120508a1313fd474864532fc9684d824a25aaed4d1d829b85de1dff9fbe"} err="failed to get container status \"aea35120508a1313fd474864532fc9684d824a25aaed4d1d829b85de1dff9fbe\": rpc error: code = NotFound desc = could not find container \"aea35120508a1313fd474864532fc9684d824a25aaed4d1d829b85de1dff9fbe\": container with ID starting with aea35120508a1313fd474864532fc9684d824a25aaed4d1d829b85de1dff9fbe not found: ID does not exist" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.467365 5055 scope.go:117] "RemoveContainer" containerID="ca9080629372db21087c8e9f37d0dd0212280a4b4fd97dfabe7450ffa2494d66" Oct 11 08:47:51 crc kubenswrapper[5055]: E1011 08:47:51.467791 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca9080629372db21087c8e9f37d0dd0212280a4b4fd97dfabe7450ffa2494d66\": container with ID starting with ca9080629372db21087c8e9f37d0dd0212280a4b4fd97dfabe7450ffa2494d66 not found: ID does not exist" containerID="ca9080629372db21087c8e9f37d0dd0212280a4b4fd97dfabe7450ffa2494d66" Oct 11 08:47:51 crc kubenswrapper[5055]: I1011 08:47:51.467815 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca9080629372db21087c8e9f37d0dd0212280a4b4fd97dfabe7450ffa2494d66"} err="failed to get container status \"ca9080629372db21087c8e9f37d0dd0212280a4b4fd97dfabe7450ffa2494d66\": rpc error: code = NotFound desc = could not find container \"ca9080629372db21087c8e9f37d0dd0212280a4b4fd97dfabe7450ffa2494d66\": container with ID starting with ca9080629372db21087c8e9f37d0dd0212280a4b4fd97dfabe7450ffa2494d66 not found: ID does not exist" Oct 11 08:47:53 crc kubenswrapper[5055]: I1011 08:47:53.010750 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57458698-6363-4c34-8b76-51def2118722" path="/var/lib/kubelet/pods/57458698-6363-4c34-8b76-51def2118722/volumes" Oct 11 08:49:32 crc kubenswrapper[5055]: I1011 08:49:32.422960 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:49:32 crc kubenswrapper[5055]: I1011 08:49:32.423588 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:50:02 crc kubenswrapper[5055]: I1011 08:50:02.422650 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:50:02 crc kubenswrapper[5055]: I1011 08:50:02.423105 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:50:32 crc kubenswrapper[5055]: I1011 08:50:32.421908 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:50:32 crc kubenswrapper[5055]: I1011 08:50:32.422503 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:50:32 crc kubenswrapper[5055]: I1011 08:50:32.422552 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 08:50:32 crc kubenswrapper[5055]: I1011 08:50:32.423137 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 08:50:32 crc kubenswrapper[5055]: I1011 08:50:32.423196 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" gracePeriod=600 Oct 11 08:50:32 crc kubenswrapper[5055]: E1011 08:50:32.551745 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:50:32 crc kubenswrapper[5055]: I1011 08:50:32.774431 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" exitCode=0 Oct 11 08:50:32 crc kubenswrapper[5055]: I1011 08:50:32.774544 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d"} Oct 11 08:50:32 crc kubenswrapper[5055]: I1011 08:50:32.774678 5055 scope.go:117] "RemoveContainer" containerID="99c8434f0cc4fbf0485d3f54572fbd27fdb42980e02f46eecdbaaf2590abb816" Oct 11 08:50:32 crc kubenswrapper[5055]: I1011 08:50:32.775293 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:50:32 crc kubenswrapper[5055]: E1011 08:50:32.775699 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:50:43 crc kubenswrapper[5055]: I1011 08:50:43.993469 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:50:43 crc kubenswrapper[5055]: E1011 08:50:43.994444 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:50:57 crc kubenswrapper[5055]: I1011 08:50:57.994528 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:50:58 crc kubenswrapper[5055]: E1011 08:50:57.996531 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:51:10 crc kubenswrapper[5055]: I1011 08:51:10.993682 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:51:10 crc kubenswrapper[5055]: E1011 08:51:10.994228 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:51:25 crc kubenswrapper[5055]: I1011 08:51:25.994493 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:51:25 crc kubenswrapper[5055]: E1011 08:51:25.995701 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:51:39 crc kubenswrapper[5055]: I1011 08:51:38.993542 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:51:39 crc kubenswrapper[5055]: E1011 08:51:39.000631 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:51:51 crc kubenswrapper[5055]: I1011 08:51:51.993536 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:51:51 crc kubenswrapper[5055]: E1011 08:51:51.994533 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:52:05 crc kubenswrapper[5055]: I1011 08:52:05.993662 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:52:05 crc kubenswrapper[5055]: E1011 08:52:05.994699 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:52:20 crc kubenswrapper[5055]: I1011 08:52:20.993946 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:52:20 crc kubenswrapper[5055]: E1011 08:52:20.994998 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:52:33 crc kubenswrapper[5055]: I1011 08:52:33.994176 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:52:33 crc kubenswrapper[5055]: E1011 08:52:33.995342 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:52:44 crc kubenswrapper[5055]: I1011 08:52:44.994389 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:52:44 crc kubenswrapper[5055]: E1011 08:52:44.995071 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.176992 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-87lcs"] Oct 11 08:52:50 crc kubenswrapper[5055]: E1011 08:52:50.178299 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57458698-6363-4c34-8b76-51def2118722" containerName="registry-server" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.178325 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="57458698-6363-4c34-8b76-51def2118722" containerName="registry-server" Oct 11 08:52:50 crc kubenswrapper[5055]: E1011 08:52:50.178362 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57458698-6363-4c34-8b76-51def2118722" containerName="extract-utilities" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.178375 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="57458698-6363-4c34-8b76-51def2118722" containerName="extract-utilities" Oct 11 08:52:50 crc kubenswrapper[5055]: E1011 08:52:50.178407 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57458698-6363-4c34-8b76-51def2118722" containerName="extract-content" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.178419 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="57458698-6363-4c34-8b76-51def2118722" containerName="extract-content" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.179054 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="57458698-6363-4c34-8b76-51def2118722" containerName="registry-server" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.184924 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.219621 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-87lcs"] Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.240433 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0594f27e-2b69-4ace-a68c-d6aff9611851-catalog-content\") pod \"redhat-operators-87lcs\" (UID: \"0594f27e-2b69-4ace-a68c-d6aff9611851\") " pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.240475 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0594f27e-2b69-4ace-a68c-d6aff9611851-utilities\") pod \"redhat-operators-87lcs\" (UID: \"0594f27e-2b69-4ace-a68c-d6aff9611851\") " pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.240600 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scvml\" (UniqueName: \"kubernetes.io/projected/0594f27e-2b69-4ace-a68c-d6aff9611851-kube-api-access-scvml\") pod \"redhat-operators-87lcs\" (UID: \"0594f27e-2b69-4ace-a68c-d6aff9611851\") " pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.341846 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scvml\" (UniqueName: \"kubernetes.io/projected/0594f27e-2b69-4ace-a68c-d6aff9611851-kube-api-access-scvml\") pod \"redhat-operators-87lcs\" (UID: \"0594f27e-2b69-4ace-a68c-d6aff9611851\") " pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.341920 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0594f27e-2b69-4ace-a68c-d6aff9611851-catalog-content\") pod \"redhat-operators-87lcs\" (UID: \"0594f27e-2b69-4ace-a68c-d6aff9611851\") " pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.341959 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0594f27e-2b69-4ace-a68c-d6aff9611851-utilities\") pod \"redhat-operators-87lcs\" (UID: \"0594f27e-2b69-4ace-a68c-d6aff9611851\") " pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.342641 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0594f27e-2b69-4ace-a68c-d6aff9611851-utilities\") pod \"redhat-operators-87lcs\" (UID: \"0594f27e-2b69-4ace-a68c-d6aff9611851\") " pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.342760 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0594f27e-2b69-4ace-a68c-d6aff9611851-catalog-content\") pod \"redhat-operators-87lcs\" (UID: \"0594f27e-2b69-4ace-a68c-d6aff9611851\") " pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.366636 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scvml\" (UniqueName: \"kubernetes.io/projected/0594f27e-2b69-4ace-a68c-d6aff9611851-kube-api-access-scvml\") pod \"redhat-operators-87lcs\" (UID: \"0594f27e-2b69-4ace-a68c-d6aff9611851\") " pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.511164 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.774365 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-87lcs"] Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.953576 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87lcs" event={"ID":"0594f27e-2b69-4ace-a68c-d6aff9611851","Type":"ContainerStarted","Data":"6793fb208732275202eed778f80f91a1b9ed5d70cccb310c310ee151f7f71a74"} Oct 11 08:52:50 crc kubenswrapper[5055]: I1011 08:52:50.953918 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87lcs" event={"ID":"0594f27e-2b69-4ace-a68c-d6aff9611851","Type":"ContainerStarted","Data":"9b966ce3cd904722720b4c26eb7d983d57461e4b34ffdffda739458707525a97"} Oct 11 08:52:51 crc kubenswrapper[5055]: I1011 08:52:51.966505 5055 generic.go:334] "Generic (PLEG): container finished" podID="0594f27e-2b69-4ace-a68c-d6aff9611851" containerID="6793fb208732275202eed778f80f91a1b9ed5d70cccb310c310ee151f7f71a74" exitCode=0 Oct 11 08:52:51 crc kubenswrapper[5055]: I1011 08:52:51.966618 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87lcs" event={"ID":"0594f27e-2b69-4ace-a68c-d6aff9611851","Type":"ContainerDied","Data":"6793fb208732275202eed778f80f91a1b9ed5d70cccb310c310ee151f7f71a74"} Oct 11 08:52:51 crc kubenswrapper[5055]: I1011 08:52:51.972379 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 08:52:52 crc kubenswrapper[5055]: I1011 08:52:52.976654 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87lcs" event={"ID":"0594f27e-2b69-4ace-a68c-d6aff9611851","Type":"ContainerStarted","Data":"3be309747885cdbf47e504a7b4828612d18d3a47544ff66d5804e1df655c0ad8"} Oct 11 08:52:53 crc kubenswrapper[5055]: I1011 08:52:53.989003 5055 generic.go:334] "Generic (PLEG): container finished" podID="0594f27e-2b69-4ace-a68c-d6aff9611851" containerID="3be309747885cdbf47e504a7b4828612d18d3a47544ff66d5804e1df655c0ad8" exitCode=0 Oct 11 08:52:53 crc kubenswrapper[5055]: I1011 08:52:53.989094 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87lcs" event={"ID":"0594f27e-2b69-4ace-a68c-d6aff9611851","Type":"ContainerDied","Data":"3be309747885cdbf47e504a7b4828612d18d3a47544ff66d5804e1df655c0ad8"} Oct 11 08:52:55 crc kubenswrapper[5055]: I1011 08:52:55.006393 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87lcs" event={"ID":"0594f27e-2b69-4ace-a68c-d6aff9611851","Type":"ContainerStarted","Data":"a7fa640517ea4556703aaa92928e48547e2944e962fb8bba26c1921ebca020fb"} Oct 11 08:52:55 crc kubenswrapper[5055]: I1011 08:52:55.075896 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-87lcs" podStartSLOduration=2.602295894 podStartE2EDuration="5.075866639s" podCreationTimestamp="2025-10-11 08:52:50 +0000 UTC" firstStartedPulling="2025-10-11 08:52:51.971936765 +0000 UTC m=+7155.746210572" lastFinishedPulling="2025-10-11 08:52:54.44550748 +0000 UTC m=+7158.219781317" observedRunningTime="2025-10-11 08:52:55.070508767 +0000 UTC m=+7158.844782594" watchObservedRunningTime="2025-10-11 08:52:55.075866639 +0000 UTC m=+7158.850140476" Oct 11 08:52:59 crc kubenswrapper[5055]: I1011 08:52:59.994416 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:52:59 crc kubenswrapper[5055]: E1011 08:52:59.998855 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:53:00 crc kubenswrapper[5055]: I1011 08:53:00.512374 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:53:00 crc kubenswrapper[5055]: I1011 08:53:00.512416 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:53:00 crc kubenswrapper[5055]: I1011 08:53:00.587407 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:53:01 crc kubenswrapper[5055]: I1011 08:53:01.087924 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:53:01 crc kubenswrapper[5055]: I1011 08:53:01.141215 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-87lcs"] Oct 11 08:53:03 crc kubenswrapper[5055]: I1011 08:53:03.063863 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-87lcs" podUID="0594f27e-2b69-4ace-a68c-d6aff9611851" containerName="registry-server" containerID="cri-o://a7fa640517ea4556703aaa92928e48547e2944e962fb8bba26c1921ebca020fb" gracePeriod=2 Oct 11 08:53:03 crc kubenswrapper[5055]: I1011 08:53:03.451306 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:53:03 crc kubenswrapper[5055]: I1011 08:53:03.532688 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scvml\" (UniqueName: \"kubernetes.io/projected/0594f27e-2b69-4ace-a68c-d6aff9611851-kube-api-access-scvml\") pod \"0594f27e-2b69-4ace-a68c-d6aff9611851\" (UID: \"0594f27e-2b69-4ace-a68c-d6aff9611851\") " Oct 11 08:53:03 crc kubenswrapper[5055]: I1011 08:53:03.532763 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0594f27e-2b69-4ace-a68c-d6aff9611851-catalog-content\") pod \"0594f27e-2b69-4ace-a68c-d6aff9611851\" (UID: \"0594f27e-2b69-4ace-a68c-d6aff9611851\") " Oct 11 08:53:03 crc kubenswrapper[5055]: I1011 08:53:03.532885 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0594f27e-2b69-4ace-a68c-d6aff9611851-utilities\") pod \"0594f27e-2b69-4ace-a68c-d6aff9611851\" (UID: \"0594f27e-2b69-4ace-a68c-d6aff9611851\") " Oct 11 08:53:03 crc kubenswrapper[5055]: I1011 08:53:03.533698 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0594f27e-2b69-4ace-a68c-d6aff9611851-utilities" (OuterVolumeSpecName: "utilities") pod "0594f27e-2b69-4ace-a68c-d6aff9611851" (UID: "0594f27e-2b69-4ace-a68c-d6aff9611851"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:53:03 crc kubenswrapper[5055]: I1011 08:53:03.542705 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0594f27e-2b69-4ace-a68c-d6aff9611851-kube-api-access-scvml" (OuterVolumeSpecName: "kube-api-access-scvml") pod "0594f27e-2b69-4ace-a68c-d6aff9611851" (UID: "0594f27e-2b69-4ace-a68c-d6aff9611851"). InnerVolumeSpecName "kube-api-access-scvml". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:53:03 crc kubenswrapper[5055]: I1011 08:53:03.635016 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scvml\" (UniqueName: \"kubernetes.io/projected/0594f27e-2b69-4ace-a68c-d6aff9611851-kube-api-access-scvml\") on node \"crc\" DevicePath \"\"" Oct 11 08:53:03 crc kubenswrapper[5055]: I1011 08:53:03.635049 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0594f27e-2b69-4ace-a68c-d6aff9611851-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.079181 5055 generic.go:334] "Generic (PLEG): container finished" podID="0594f27e-2b69-4ace-a68c-d6aff9611851" containerID="a7fa640517ea4556703aaa92928e48547e2944e962fb8bba26c1921ebca020fb" exitCode=0 Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.079260 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87lcs" event={"ID":"0594f27e-2b69-4ace-a68c-d6aff9611851","Type":"ContainerDied","Data":"a7fa640517ea4556703aaa92928e48547e2944e962fb8bba26c1921ebca020fb"} Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.079311 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-87lcs" Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.079340 5055 scope.go:117] "RemoveContainer" containerID="a7fa640517ea4556703aaa92928e48547e2944e962fb8bba26c1921ebca020fb" Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.079313 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-87lcs" event={"ID":"0594f27e-2b69-4ace-a68c-d6aff9611851","Type":"ContainerDied","Data":"9b966ce3cd904722720b4c26eb7d983d57461e4b34ffdffda739458707525a97"} Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.100032 5055 scope.go:117] "RemoveContainer" containerID="3be309747885cdbf47e504a7b4828612d18d3a47544ff66d5804e1df655c0ad8" Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.127607 5055 scope.go:117] "RemoveContainer" containerID="6793fb208732275202eed778f80f91a1b9ed5d70cccb310c310ee151f7f71a74" Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.153015 5055 scope.go:117] "RemoveContainer" containerID="a7fa640517ea4556703aaa92928e48547e2944e962fb8bba26c1921ebca020fb" Oct 11 08:53:04 crc kubenswrapper[5055]: E1011 08:53:04.153685 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7fa640517ea4556703aaa92928e48547e2944e962fb8bba26c1921ebca020fb\": container with ID starting with a7fa640517ea4556703aaa92928e48547e2944e962fb8bba26c1921ebca020fb not found: ID does not exist" containerID="a7fa640517ea4556703aaa92928e48547e2944e962fb8bba26c1921ebca020fb" Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.153756 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7fa640517ea4556703aaa92928e48547e2944e962fb8bba26c1921ebca020fb"} err="failed to get container status \"a7fa640517ea4556703aaa92928e48547e2944e962fb8bba26c1921ebca020fb\": rpc error: code = NotFound desc = could not find container \"a7fa640517ea4556703aaa92928e48547e2944e962fb8bba26c1921ebca020fb\": container with ID starting with a7fa640517ea4556703aaa92928e48547e2944e962fb8bba26c1921ebca020fb not found: ID does not exist" Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.153819 5055 scope.go:117] "RemoveContainer" containerID="3be309747885cdbf47e504a7b4828612d18d3a47544ff66d5804e1df655c0ad8" Oct 11 08:53:04 crc kubenswrapper[5055]: E1011 08:53:04.154260 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3be309747885cdbf47e504a7b4828612d18d3a47544ff66d5804e1df655c0ad8\": container with ID starting with 3be309747885cdbf47e504a7b4828612d18d3a47544ff66d5804e1df655c0ad8 not found: ID does not exist" containerID="3be309747885cdbf47e504a7b4828612d18d3a47544ff66d5804e1df655c0ad8" Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.154303 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3be309747885cdbf47e504a7b4828612d18d3a47544ff66d5804e1df655c0ad8"} err="failed to get container status \"3be309747885cdbf47e504a7b4828612d18d3a47544ff66d5804e1df655c0ad8\": rpc error: code = NotFound desc = could not find container \"3be309747885cdbf47e504a7b4828612d18d3a47544ff66d5804e1df655c0ad8\": container with ID starting with 3be309747885cdbf47e504a7b4828612d18d3a47544ff66d5804e1df655c0ad8 not found: ID does not exist" Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.154326 5055 scope.go:117] "RemoveContainer" containerID="6793fb208732275202eed778f80f91a1b9ed5d70cccb310c310ee151f7f71a74" Oct 11 08:53:04 crc kubenswrapper[5055]: E1011 08:53:04.154700 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6793fb208732275202eed778f80f91a1b9ed5d70cccb310c310ee151f7f71a74\": container with ID starting with 6793fb208732275202eed778f80f91a1b9ed5d70cccb310c310ee151f7f71a74 not found: ID does not exist" containerID="6793fb208732275202eed778f80f91a1b9ed5d70cccb310c310ee151f7f71a74" Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.154755 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6793fb208732275202eed778f80f91a1b9ed5d70cccb310c310ee151f7f71a74"} err="failed to get container status \"6793fb208732275202eed778f80f91a1b9ed5d70cccb310c310ee151f7f71a74\": rpc error: code = NotFound desc = could not find container \"6793fb208732275202eed778f80f91a1b9ed5d70cccb310c310ee151f7f71a74\": container with ID starting with 6793fb208732275202eed778f80f91a1b9ed5d70cccb310c310ee151f7f71a74 not found: ID does not exist" Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.486647 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0594f27e-2b69-4ace-a68c-d6aff9611851-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0594f27e-2b69-4ace-a68c-d6aff9611851" (UID: "0594f27e-2b69-4ace-a68c-d6aff9611851"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.550686 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0594f27e-2b69-4ace-a68c-d6aff9611851-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.752903 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-87lcs"] Oct 11 08:53:04 crc kubenswrapper[5055]: I1011 08:53:04.766874 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-87lcs"] Oct 11 08:53:05 crc kubenswrapper[5055]: I1011 08:53:05.008666 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0594f27e-2b69-4ace-a68c-d6aff9611851" path="/var/lib/kubelet/pods/0594f27e-2b69-4ace-a68c-d6aff9611851/volumes" Oct 11 08:53:10 crc kubenswrapper[5055]: I1011 08:53:10.994211 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:53:10 crc kubenswrapper[5055]: E1011 08:53:10.995251 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:53:25 crc kubenswrapper[5055]: I1011 08:53:25.993667 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:53:25 crc kubenswrapper[5055]: E1011 08:53:25.994576 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:53:38 crc kubenswrapper[5055]: I1011 08:53:38.993298 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:53:38 crc kubenswrapper[5055]: E1011 08:53:38.994199 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:53:53 crc kubenswrapper[5055]: I1011 08:53:53.993685 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:53:53 crc kubenswrapper[5055]: E1011 08:53:53.994515 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:54:08 crc kubenswrapper[5055]: I1011 08:54:08.994222 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:54:08 crc kubenswrapper[5055]: E1011 08:54:08.996487 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.208918 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mlmk9"] Oct 11 08:54:13 crc kubenswrapper[5055]: E1011 08:54:13.209210 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0594f27e-2b69-4ace-a68c-d6aff9611851" containerName="registry-server" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.209223 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0594f27e-2b69-4ace-a68c-d6aff9611851" containerName="registry-server" Oct 11 08:54:13 crc kubenswrapper[5055]: E1011 08:54:13.209263 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0594f27e-2b69-4ace-a68c-d6aff9611851" containerName="extract-utilities" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.209270 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0594f27e-2b69-4ace-a68c-d6aff9611851" containerName="extract-utilities" Oct 11 08:54:13 crc kubenswrapper[5055]: E1011 08:54:13.209282 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0594f27e-2b69-4ace-a68c-d6aff9611851" containerName="extract-content" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.209289 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="0594f27e-2b69-4ace-a68c-d6aff9611851" containerName="extract-content" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.209428 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="0594f27e-2b69-4ace-a68c-d6aff9611851" containerName="registry-server" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.211943 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.222485 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mlmk9"] Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.249595 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/808c65ca-48d2-4b47-a73a-14504bf3a4fe-utilities\") pod \"community-operators-mlmk9\" (UID: \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\") " pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.249646 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2scsg\" (UniqueName: \"kubernetes.io/projected/808c65ca-48d2-4b47-a73a-14504bf3a4fe-kube-api-access-2scsg\") pod \"community-operators-mlmk9\" (UID: \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\") " pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.249887 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/808c65ca-48d2-4b47-a73a-14504bf3a4fe-catalog-content\") pod \"community-operators-mlmk9\" (UID: \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\") " pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.351566 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/808c65ca-48d2-4b47-a73a-14504bf3a4fe-utilities\") pod \"community-operators-mlmk9\" (UID: \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\") " pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.351629 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2scsg\" (UniqueName: \"kubernetes.io/projected/808c65ca-48d2-4b47-a73a-14504bf3a4fe-kube-api-access-2scsg\") pod \"community-operators-mlmk9\" (UID: \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\") " pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.351709 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/808c65ca-48d2-4b47-a73a-14504bf3a4fe-catalog-content\") pod \"community-operators-mlmk9\" (UID: \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\") " pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.352553 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/808c65ca-48d2-4b47-a73a-14504bf3a4fe-utilities\") pod \"community-operators-mlmk9\" (UID: \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\") " pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.352645 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/808c65ca-48d2-4b47-a73a-14504bf3a4fe-catalog-content\") pod \"community-operators-mlmk9\" (UID: \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\") " pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.373124 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2scsg\" (UniqueName: \"kubernetes.io/projected/808c65ca-48d2-4b47-a73a-14504bf3a4fe-kube-api-access-2scsg\") pod \"community-operators-mlmk9\" (UID: \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\") " pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:13 crc kubenswrapper[5055]: I1011 08:54:13.542076 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:14 crc kubenswrapper[5055]: I1011 08:54:14.025706 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mlmk9"] Oct 11 08:54:14 crc kubenswrapper[5055]: I1011 08:54:14.682521 5055 generic.go:334] "Generic (PLEG): container finished" podID="808c65ca-48d2-4b47-a73a-14504bf3a4fe" containerID="71a3eeb2af0dc64ffa2f835ded6291b9554f542cab148ff3681d3f45dab8e532" exitCode=0 Oct 11 08:54:14 crc kubenswrapper[5055]: I1011 08:54:14.682576 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mlmk9" event={"ID":"808c65ca-48d2-4b47-a73a-14504bf3a4fe","Type":"ContainerDied","Data":"71a3eeb2af0dc64ffa2f835ded6291b9554f542cab148ff3681d3f45dab8e532"} Oct 11 08:54:14 crc kubenswrapper[5055]: I1011 08:54:14.682761 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mlmk9" event={"ID":"808c65ca-48d2-4b47-a73a-14504bf3a4fe","Type":"ContainerStarted","Data":"44a10d60dbef6cce6de0fe965f6b106a68074019d76930ded9348eafbf12562f"} Oct 11 08:54:15 crc kubenswrapper[5055]: I1011 08:54:15.707175 5055 generic.go:334] "Generic (PLEG): container finished" podID="808c65ca-48d2-4b47-a73a-14504bf3a4fe" containerID="25cb794d672fcaea4e51800ccd72e9a4b55865d4b4194b456ccc5c275b5bca56" exitCode=0 Oct 11 08:54:15 crc kubenswrapper[5055]: I1011 08:54:15.707545 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mlmk9" event={"ID":"808c65ca-48d2-4b47-a73a-14504bf3a4fe","Type":"ContainerDied","Data":"25cb794d672fcaea4e51800ccd72e9a4b55865d4b4194b456ccc5c275b5bca56"} Oct 11 08:54:16 crc kubenswrapper[5055]: I1011 08:54:16.717292 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mlmk9" event={"ID":"808c65ca-48d2-4b47-a73a-14504bf3a4fe","Type":"ContainerStarted","Data":"b28c838056896371bfed7f7ff19fbc26bbceaa66e3c572bdf6e7d6f6ba438ecd"} Oct 11 08:54:16 crc kubenswrapper[5055]: I1011 08:54:16.737570 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mlmk9" podStartSLOduration=2.317756686 podStartE2EDuration="3.737556416s" podCreationTimestamp="2025-10-11 08:54:13 +0000 UTC" firstStartedPulling="2025-10-11 08:54:14.684751053 +0000 UTC m=+7238.459024870" lastFinishedPulling="2025-10-11 08:54:16.104550773 +0000 UTC m=+7239.878824600" observedRunningTime="2025-10-11 08:54:16.736060644 +0000 UTC m=+7240.510334441" watchObservedRunningTime="2025-10-11 08:54:16.737556416 +0000 UTC m=+7240.511830223" Oct 11 08:54:19 crc kubenswrapper[5055]: I1011 08:54:19.993373 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:54:19 crc kubenswrapper[5055]: E1011 08:54:19.994091 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:54:23 crc kubenswrapper[5055]: I1011 08:54:23.542901 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:23 crc kubenswrapper[5055]: I1011 08:54:23.543341 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:23 crc kubenswrapper[5055]: I1011 08:54:23.606221 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:23 crc kubenswrapper[5055]: I1011 08:54:23.819623 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:23 crc kubenswrapper[5055]: I1011 08:54:23.873372 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mlmk9"] Oct 11 08:54:25 crc kubenswrapper[5055]: I1011 08:54:25.784873 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mlmk9" podUID="808c65ca-48d2-4b47-a73a-14504bf3a4fe" containerName="registry-server" containerID="cri-o://b28c838056896371bfed7f7ff19fbc26bbceaa66e3c572bdf6e7d6f6ba438ecd" gracePeriod=2 Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.738797 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.794123 5055 generic.go:334] "Generic (PLEG): container finished" podID="808c65ca-48d2-4b47-a73a-14504bf3a4fe" containerID="b28c838056896371bfed7f7ff19fbc26bbceaa66e3c572bdf6e7d6f6ba438ecd" exitCode=0 Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.794165 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mlmk9" event={"ID":"808c65ca-48d2-4b47-a73a-14504bf3a4fe","Type":"ContainerDied","Data":"b28c838056896371bfed7f7ff19fbc26bbceaa66e3c572bdf6e7d6f6ba438ecd"} Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.794213 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mlmk9" event={"ID":"808c65ca-48d2-4b47-a73a-14504bf3a4fe","Type":"ContainerDied","Data":"44a10d60dbef6cce6de0fe965f6b106a68074019d76930ded9348eafbf12562f"} Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.794231 5055 scope.go:117] "RemoveContainer" containerID="b28c838056896371bfed7f7ff19fbc26bbceaa66e3c572bdf6e7d6f6ba438ecd" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.794288 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mlmk9" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.812571 5055 scope.go:117] "RemoveContainer" containerID="25cb794d672fcaea4e51800ccd72e9a4b55865d4b4194b456ccc5c275b5bca56" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.833631 5055 scope.go:117] "RemoveContainer" containerID="71a3eeb2af0dc64ffa2f835ded6291b9554f542cab148ff3681d3f45dab8e532" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.850094 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/808c65ca-48d2-4b47-a73a-14504bf3a4fe-catalog-content\") pod \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\" (UID: \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\") " Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.850148 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/808c65ca-48d2-4b47-a73a-14504bf3a4fe-utilities\") pod \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\" (UID: \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\") " Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.850198 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2scsg\" (UniqueName: \"kubernetes.io/projected/808c65ca-48d2-4b47-a73a-14504bf3a4fe-kube-api-access-2scsg\") pod \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\" (UID: \"808c65ca-48d2-4b47-a73a-14504bf3a4fe\") " Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.851258 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/808c65ca-48d2-4b47-a73a-14504bf3a4fe-utilities" (OuterVolumeSpecName: "utilities") pod "808c65ca-48d2-4b47-a73a-14504bf3a4fe" (UID: "808c65ca-48d2-4b47-a73a-14504bf3a4fe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.854321 5055 scope.go:117] "RemoveContainer" containerID="b28c838056896371bfed7f7ff19fbc26bbceaa66e3c572bdf6e7d6f6ba438ecd" Oct 11 08:54:26 crc kubenswrapper[5055]: E1011 08:54:26.855788 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b28c838056896371bfed7f7ff19fbc26bbceaa66e3c572bdf6e7d6f6ba438ecd\": container with ID starting with b28c838056896371bfed7f7ff19fbc26bbceaa66e3c572bdf6e7d6f6ba438ecd not found: ID does not exist" containerID="b28c838056896371bfed7f7ff19fbc26bbceaa66e3c572bdf6e7d6f6ba438ecd" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.855822 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b28c838056896371bfed7f7ff19fbc26bbceaa66e3c572bdf6e7d6f6ba438ecd"} err="failed to get container status \"b28c838056896371bfed7f7ff19fbc26bbceaa66e3c572bdf6e7d6f6ba438ecd\": rpc error: code = NotFound desc = could not find container \"b28c838056896371bfed7f7ff19fbc26bbceaa66e3c572bdf6e7d6f6ba438ecd\": container with ID starting with b28c838056896371bfed7f7ff19fbc26bbceaa66e3c572bdf6e7d6f6ba438ecd not found: ID does not exist" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.855843 5055 scope.go:117] "RemoveContainer" containerID="25cb794d672fcaea4e51800ccd72e9a4b55865d4b4194b456ccc5c275b5bca56" Oct 11 08:54:26 crc kubenswrapper[5055]: E1011 08:54:26.856150 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25cb794d672fcaea4e51800ccd72e9a4b55865d4b4194b456ccc5c275b5bca56\": container with ID starting with 25cb794d672fcaea4e51800ccd72e9a4b55865d4b4194b456ccc5c275b5bca56 not found: ID does not exist" containerID="25cb794d672fcaea4e51800ccd72e9a4b55865d4b4194b456ccc5c275b5bca56" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.856185 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25cb794d672fcaea4e51800ccd72e9a4b55865d4b4194b456ccc5c275b5bca56"} err="failed to get container status \"25cb794d672fcaea4e51800ccd72e9a4b55865d4b4194b456ccc5c275b5bca56\": rpc error: code = NotFound desc = could not find container \"25cb794d672fcaea4e51800ccd72e9a4b55865d4b4194b456ccc5c275b5bca56\": container with ID starting with 25cb794d672fcaea4e51800ccd72e9a4b55865d4b4194b456ccc5c275b5bca56 not found: ID does not exist" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.856205 5055 scope.go:117] "RemoveContainer" containerID="71a3eeb2af0dc64ffa2f835ded6291b9554f542cab148ff3681d3f45dab8e532" Oct 11 08:54:26 crc kubenswrapper[5055]: E1011 08:54:26.856435 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71a3eeb2af0dc64ffa2f835ded6291b9554f542cab148ff3681d3f45dab8e532\": container with ID starting with 71a3eeb2af0dc64ffa2f835ded6291b9554f542cab148ff3681d3f45dab8e532 not found: ID does not exist" containerID="71a3eeb2af0dc64ffa2f835ded6291b9554f542cab148ff3681d3f45dab8e532" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.856461 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71a3eeb2af0dc64ffa2f835ded6291b9554f542cab148ff3681d3f45dab8e532"} err="failed to get container status \"71a3eeb2af0dc64ffa2f835ded6291b9554f542cab148ff3681d3f45dab8e532\": rpc error: code = NotFound desc = could not find container \"71a3eeb2af0dc64ffa2f835ded6291b9554f542cab148ff3681d3f45dab8e532\": container with ID starting with 71a3eeb2af0dc64ffa2f835ded6291b9554f542cab148ff3681d3f45dab8e532 not found: ID does not exist" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.856862 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/808c65ca-48d2-4b47-a73a-14504bf3a4fe-kube-api-access-2scsg" (OuterVolumeSpecName: "kube-api-access-2scsg") pod "808c65ca-48d2-4b47-a73a-14504bf3a4fe" (UID: "808c65ca-48d2-4b47-a73a-14504bf3a4fe"). InnerVolumeSpecName "kube-api-access-2scsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.904521 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/808c65ca-48d2-4b47-a73a-14504bf3a4fe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "808c65ca-48d2-4b47-a73a-14504bf3a4fe" (UID: "808c65ca-48d2-4b47-a73a-14504bf3a4fe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.951383 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/808c65ca-48d2-4b47-a73a-14504bf3a4fe-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.951412 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2scsg\" (UniqueName: \"kubernetes.io/projected/808c65ca-48d2-4b47-a73a-14504bf3a4fe-kube-api-access-2scsg\") on node \"crc\" DevicePath \"\"" Oct 11 08:54:26 crc kubenswrapper[5055]: I1011 08:54:26.951423 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/808c65ca-48d2-4b47-a73a-14504bf3a4fe-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:54:27 crc kubenswrapper[5055]: I1011 08:54:27.113972 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mlmk9"] Oct 11 08:54:27 crc kubenswrapper[5055]: I1011 08:54:27.118612 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mlmk9"] Oct 11 08:54:29 crc kubenswrapper[5055]: I1011 08:54:29.007456 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="808c65ca-48d2-4b47-a73a-14504bf3a4fe" path="/var/lib/kubelet/pods/808c65ca-48d2-4b47-a73a-14504bf3a4fe/volumes" Oct 11 08:54:34 crc kubenswrapper[5055]: I1011 08:54:34.993971 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:54:34 crc kubenswrapper[5055]: E1011 08:54:34.994910 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:54:47 crc kubenswrapper[5055]: I1011 08:54:47.005003 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:54:47 crc kubenswrapper[5055]: E1011 08:54:47.005839 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:54:57 crc kubenswrapper[5055]: I1011 08:54:57.994036 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:54:57 crc kubenswrapper[5055]: E1011 08:54:57.995048 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.293912 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-z5mwz"] Oct 11 08:55:07 crc kubenswrapper[5055]: E1011 08:55:07.294820 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="808c65ca-48d2-4b47-a73a-14504bf3a4fe" containerName="registry-server" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.294836 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="808c65ca-48d2-4b47-a73a-14504bf3a4fe" containerName="registry-server" Oct 11 08:55:07 crc kubenswrapper[5055]: E1011 08:55:07.294847 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="808c65ca-48d2-4b47-a73a-14504bf3a4fe" containerName="extract-utilities" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.294853 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="808c65ca-48d2-4b47-a73a-14504bf3a4fe" containerName="extract-utilities" Oct 11 08:55:07 crc kubenswrapper[5055]: E1011 08:55:07.294868 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="808c65ca-48d2-4b47-a73a-14504bf3a4fe" containerName="extract-content" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.294876 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="808c65ca-48d2-4b47-a73a-14504bf3a4fe" containerName="extract-content" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.295022 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="808c65ca-48d2-4b47-a73a-14504bf3a4fe" containerName="registry-server" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.295988 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.306749 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z5mwz"] Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.439640 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84j2k\" (UniqueName: \"kubernetes.io/projected/9da159cd-b657-42ff-baea-49cf11837086-kube-api-access-84j2k\") pod \"redhat-marketplace-z5mwz\" (UID: \"9da159cd-b657-42ff-baea-49cf11837086\") " pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.439733 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9da159cd-b657-42ff-baea-49cf11837086-catalog-content\") pod \"redhat-marketplace-z5mwz\" (UID: \"9da159cd-b657-42ff-baea-49cf11837086\") " pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.439760 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9da159cd-b657-42ff-baea-49cf11837086-utilities\") pod \"redhat-marketplace-z5mwz\" (UID: \"9da159cd-b657-42ff-baea-49cf11837086\") " pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.540993 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84j2k\" (UniqueName: \"kubernetes.io/projected/9da159cd-b657-42ff-baea-49cf11837086-kube-api-access-84j2k\") pod \"redhat-marketplace-z5mwz\" (UID: \"9da159cd-b657-42ff-baea-49cf11837086\") " pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.541075 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9da159cd-b657-42ff-baea-49cf11837086-catalog-content\") pod \"redhat-marketplace-z5mwz\" (UID: \"9da159cd-b657-42ff-baea-49cf11837086\") " pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.541109 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9da159cd-b657-42ff-baea-49cf11837086-utilities\") pod \"redhat-marketplace-z5mwz\" (UID: \"9da159cd-b657-42ff-baea-49cf11837086\") " pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.541757 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9da159cd-b657-42ff-baea-49cf11837086-catalog-content\") pod \"redhat-marketplace-z5mwz\" (UID: \"9da159cd-b657-42ff-baea-49cf11837086\") " pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.541822 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9da159cd-b657-42ff-baea-49cf11837086-utilities\") pod \"redhat-marketplace-z5mwz\" (UID: \"9da159cd-b657-42ff-baea-49cf11837086\") " pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.566087 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84j2k\" (UniqueName: \"kubernetes.io/projected/9da159cd-b657-42ff-baea-49cf11837086-kube-api-access-84j2k\") pod \"redhat-marketplace-z5mwz\" (UID: \"9da159cd-b657-42ff-baea-49cf11837086\") " pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.613879 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:07 crc kubenswrapper[5055]: I1011 08:55:07.840264 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z5mwz"] Oct 11 08:55:08 crc kubenswrapper[5055]: I1011 08:55:08.136444 5055 generic.go:334] "Generic (PLEG): container finished" podID="9da159cd-b657-42ff-baea-49cf11837086" containerID="019699e2754422805412b0015929b17451a2a0aeeda9fb6699d1a544ec87017f" exitCode=0 Oct 11 08:55:08 crc kubenswrapper[5055]: I1011 08:55:08.136755 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z5mwz" event={"ID":"9da159cd-b657-42ff-baea-49cf11837086","Type":"ContainerDied","Data":"019699e2754422805412b0015929b17451a2a0aeeda9fb6699d1a544ec87017f"} Oct 11 08:55:08 crc kubenswrapper[5055]: I1011 08:55:08.136825 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z5mwz" event={"ID":"9da159cd-b657-42ff-baea-49cf11837086","Type":"ContainerStarted","Data":"2cc49650af4910c8fc570ac558b45bfb20923e3db5679a43bcff6cea60e4adfc"} Oct 11 08:55:09 crc kubenswrapper[5055]: I1011 08:55:09.150343 5055 generic.go:334] "Generic (PLEG): container finished" podID="9da159cd-b657-42ff-baea-49cf11837086" containerID="a20585fea6b50783567e0f5e8f82eae2e63e723b10b413867739222cdd6dff47" exitCode=0 Oct 11 08:55:09 crc kubenswrapper[5055]: I1011 08:55:09.150401 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z5mwz" event={"ID":"9da159cd-b657-42ff-baea-49cf11837086","Type":"ContainerDied","Data":"a20585fea6b50783567e0f5e8f82eae2e63e723b10b413867739222cdd6dff47"} Oct 11 08:55:10 crc kubenswrapper[5055]: I1011 08:55:10.159373 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z5mwz" event={"ID":"9da159cd-b657-42ff-baea-49cf11837086","Type":"ContainerStarted","Data":"be81eda1533e83ac677a95ca49f0bb664b09f697591050578e29e801e01ba5a5"} Oct 11 08:55:10 crc kubenswrapper[5055]: I1011 08:55:10.186033 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-z5mwz" podStartSLOduration=1.605021775 podStartE2EDuration="3.185996697s" podCreationTimestamp="2025-10-11 08:55:07 +0000 UTC" firstStartedPulling="2025-10-11 08:55:08.139350159 +0000 UTC m=+7291.913623966" lastFinishedPulling="2025-10-11 08:55:09.720325041 +0000 UTC m=+7293.494598888" observedRunningTime="2025-10-11 08:55:10.185145803 +0000 UTC m=+7293.959419620" watchObservedRunningTime="2025-10-11 08:55:10.185996697 +0000 UTC m=+7293.960270544" Oct 11 08:55:10 crc kubenswrapper[5055]: I1011 08:55:10.999529 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:55:11 crc kubenswrapper[5055]: E1011 08:55:11.000075 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:55:17 crc kubenswrapper[5055]: I1011 08:55:17.614225 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:17 crc kubenswrapper[5055]: I1011 08:55:17.614975 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:17 crc kubenswrapper[5055]: I1011 08:55:17.683685 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:18 crc kubenswrapper[5055]: I1011 08:55:18.290787 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:18 crc kubenswrapper[5055]: I1011 08:55:18.347997 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z5mwz"] Oct 11 08:55:20 crc kubenswrapper[5055]: I1011 08:55:20.245884 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-z5mwz" podUID="9da159cd-b657-42ff-baea-49cf11837086" containerName="registry-server" containerID="cri-o://be81eda1533e83ac677a95ca49f0bb664b09f697591050578e29e801e01ba5a5" gracePeriod=2 Oct 11 08:55:20 crc kubenswrapper[5055]: I1011 08:55:20.721179 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:20 crc kubenswrapper[5055]: I1011 08:55:20.742899 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84j2k\" (UniqueName: \"kubernetes.io/projected/9da159cd-b657-42ff-baea-49cf11837086-kube-api-access-84j2k\") pod \"9da159cd-b657-42ff-baea-49cf11837086\" (UID: \"9da159cd-b657-42ff-baea-49cf11837086\") " Oct 11 08:55:20 crc kubenswrapper[5055]: I1011 08:55:20.743087 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9da159cd-b657-42ff-baea-49cf11837086-catalog-content\") pod \"9da159cd-b657-42ff-baea-49cf11837086\" (UID: \"9da159cd-b657-42ff-baea-49cf11837086\") " Oct 11 08:55:20 crc kubenswrapper[5055]: I1011 08:55:20.743198 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9da159cd-b657-42ff-baea-49cf11837086-utilities\") pod \"9da159cd-b657-42ff-baea-49cf11837086\" (UID: \"9da159cd-b657-42ff-baea-49cf11837086\") " Oct 11 08:55:20 crc kubenswrapper[5055]: I1011 08:55:20.744883 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9da159cd-b657-42ff-baea-49cf11837086-utilities" (OuterVolumeSpecName: "utilities") pod "9da159cd-b657-42ff-baea-49cf11837086" (UID: "9da159cd-b657-42ff-baea-49cf11837086"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:55:20 crc kubenswrapper[5055]: I1011 08:55:20.752053 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9da159cd-b657-42ff-baea-49cf11837086-kube-api-access-84j2k" (OuterVolumeSpecName: "kube-api-access-84j2k") pod "9da159cd-b657-42ff-baea-49cf11837086" (UID: "9da159cd-b657-42ff-baea-49cf11837086"). InnerVolumeSpecName "kube-api-access-84j2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:55:20 crc kubenswrapper[5055]: I1011 08:55:20.776946 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9da159cd-b657-42ff-baea-49cf11837086-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9da159cd-b657-42ff-baea-49cf11837086" (UID: "9da159cd-b657-42ff-baea-49cf11837086"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:55:20 crc kubenswrapper[5055]: I1011 08:55:20.844709 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84j2k\" (UniqueName: \"kubernetes.io/projected/9da159cd-b657-42ff-baea-49cf11837086-kube-api-access-84j2k\") on node \"crc\" DevicePath \"\"" Oct 11 08:55:20 crc kubenswrapper[5055]: I1011 08:55:20.844795 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9da159cd-b657-42ff-baea-49cf11837086-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:55:20 crc kubenswrapper[5055]: I1011 08:55:20.844816 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9da159cd-b657-42ff-baea-49cf11837086-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.266560 5055 generic.go:334] "Generic (PLEG): container finished" podID="9da159cd-b657-42ff-baea-49cf11837086" containerID="be81eda1533e83ac677a95ca49f0bb664b09f697591050578e29e801e01ba5a5" exitCode=0 Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.266625 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z5mwz" event={"ID":"9da159cd-b657-42ff-baea-49cf11837086","Type":"ContainerDied","Data":"be81eda1533e83ac677a95ca49f0bb664b09f697591050578e29e801e01ba5a5"} Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.266663 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z5mwz" event={"ID":"9da159cd-b657-42ff-baea-49cf11837086","Type":"ContainerDied","Data":"2cc49650af4910c8fc570ac558b45bfb20923e3db5679a43bcff6cea60e4adfc"} Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.266691 5055 scope.go:117] "RemoveContainer" containerID="be81eda1533e83ac677a95ca49f0bb664b09f697591050578e29e801e01ba5a5" Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.266947 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z5mwz" Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.299048 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z5mwz"] Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.304030 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-z5mwz"] Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.310407 5055 scope.go:117] "RemoveContainer" containerID="a20585fea6b50783567e0f5e8f82eae2e63e723b10b413867739222cdd6dff47" Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.337782 5055 scope.go:117] "RemoveContainer" containerID="019699e2754422805412b0015929b17451a2a0aeeda9fb6699d1a544ec87017f" Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.364883 5055 scope.go:117] "RemoveContainer" containerID="be81eda1533e83ac677a95ca49f0bb664b09f697591050578e29e801e01ba5a5" Oct 11 08:55:21 crc kubenswrapper[5055]: E1011 08:55:21.365396 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be81eda1533e83ac677a95ca49f0bb664b09f697591050578e29e801e01ba5a5\": container with ID starting with be81eda1533e83ac677a95ca49f0bb664b09f697591050578e29e801e01ba5a5 not found: ID does not exist" containerID="be81eda1533e83ac677a95ca49f0bb664b09f697591050578e29e801e01ba5a5" Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.365451 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be81eda1533e83ac677a95ca49f0bb664b09f697591050578e29e801e01ba5a5"} err="failed to get container status \"be81eda1533e83ac677a95ca49f0bb664b09f697591050578e29e801e01ba5a5\": rpc error: code = NotFound desc = could not find container \"be81eda1533e83ac677a95ca49f0bb664b09f697591050578e29e801e01ba5a5\": container with ID starting with be81eda1533e83ac677a95ca49f0bb664b09f697591050578e29e801e01ba5a5 not found: ID does not exist" Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.365483 5055 scope.go:117] "RemoveContainer" containerID="a20585fea6b50783567e0f5e8f82eae2e63e723b10b413867739222cdd6dff47" Oct 11 08:55:21 crc kubenswrapper[5055]: E1011 08:55:21.365945 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a20585fea6b50783567e0f5e8f82eae2e63e723b10b413867739222cdd6dff47\": container with ID starting with a20585fea6b50783567e0f5e8f82eae2e63e723b10b413867739222cdd6dff47 not found: ID does not exist" containerID="a20585fea6b50783567e0f5e8f82eae2e63e723b10b413867739222cdd6dff47" Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.365979 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a20585fea6b50783567e0f5e8f82eae2e63e723b10b413867739222cdd6dff47"} err="failed to get container status \"a20585fea6b50783567e0f5e8f82eae2e63e723b10b413867739222cdd6dff47\": rpc error: code = NotFound desc = could not find container \"a20585fea6b50783567e0f5e8f82eae2e63e723b10b413867739222cdd6dff47\": container with ID starting with a20585fea6b50783567e0f5e8f82eae2e63e723b10b413867739222cdd6dff47 not found: ID does not exist" Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.365997 5055 scope.go:117] "RemoveContainer" containerID="019699e2754422805412b0015929b17451a2a0aeeda9fb6699d1a544ec87017f" Oct 11 08:55:21 crc kubenswrapper[5055]: E1011 08:55:21.366340 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"019699e2754422805412b0015929b17451a2a0aeeda9fb6699d1a544ec87017f\": container with ID starting with 019699e2754422805412b0015929b17451a2a0aeeda9fb6699d1a544ec87017f not found: ID does not exist" containerID="019699e2754422805412b0015929b17451a2a0aeeda9fb6699d1a544ec87017f" Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.366379 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"019699e2754422805412b0015929b17451a2a0aeeda9fb6699d1a544ec87017f"} err="failed to get container status \"019699e2754422805412b0015929b17451a2a0aeeda9fb6699d1a544ec87017f\": rpc error: code = NotFound desc = could not find container \"019699e2754422805412b0015929b17451a2a0aeeda9fb6699d1a544ec87017f\": container with ID starting with 019699e2754422805412b0015929b17451a2a0aeeda9fb6699d1a544ec87017f not found: ID does not exist" Oct 11 08:55:21 crc kubenswrapper[5055]: I1011 08:55:21.993637 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:55:21 crc kubenswrapper[5055]: E1011 08:55:21.994627 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 08:55:23 crc kubenswrapper[5055]: I1011 08:55:23.014676 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9da159cd-b657-42ff-baea-49cf11837086" path="/var/lib/kubelet/pods/9da159cd-b657-42ff-baea-49cf11837086/volumes" Oct 11 08:55:35 crc kubenswrapper[5055]: I1011 08:55:35.993679 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 08:55:36 crc kubenswrapper[5055]: I1011 08:55:36.419013 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"9beeb86f04bd6ee20382227b37fc46eaa95984219227544a26815ea3886019de"} Oct 11 08:58:02 crc kubenswrapper[5055]: I1011 08:58:02.422205 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:58:02 crc kubenswrapper[5055]: I1011 08:58:02.423221 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.142983 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-q5tk2"] Oct 11 08:58:27 crc kubenswrapper[5055]: E1011 08:58:27.144216 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9da159cd-b657-42ff-baea-49cf11837086" containerName="registry-server" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.144233 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="9da159cd-b657-42ff-baea-49cf11837086" containerName="registry-server" Oct 11 08:58:27 crc kubenswrapper[5055]: E1011 08:58:27.144248 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9da159cd-b657-42ff-baea-49cf11837086" containerName="extract-utilities" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.144257 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="9da159cd-b657-42ff-baea-49cf11837086" containerName="extract-utilities" Oct 11 08:58:27 crc kubenswrapper[5055]: E1011 08:58:27.144293 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9da159cd-b657-42ff-baea-49cf11837086" containerName="extract-content" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.144302 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="9da159cd-b657-42ff-baea-49cf11837086" containerName="extract-content" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.144499 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="9da159cd-b657-42ff-baea-49cf11837086" containerName="registry-server" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.149301 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.169992 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q5tk2"] Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.325007 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqm7w\" (UniqueName: \"kubernetes.io/projected/c96c1983-e09a-4afe-93ee-16ad11df114a-kube-api-access-bqm7w\") pod \"certified-operators-q5tk2\" (UID: \"c96c1983-e09a-4afe-93ee-16ad11df114a\") " pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.325092 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c96c1983-e09a-4afe-93ee-16ad11df114a-catalog-content\") pod \"certified-operators-q5tk2\" (UID: \"c96c1983-e09a-4afe-93ee-16ad11df114a\") " pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.325122 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c96c1983-e09a-4afe-93ee-16ad11df114a-utilities\") pod \"certified-operators-q5tk2\" (UID: \"c96c1983-e09a-4afe-93ee-16ad11df114a\") " pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.426304 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqm7w\" (UniqueName: \"kubernetes.io/projected/c96c1983-e09a-4afe-93ee-16ad11df114a-kube-api-access-bqm7w\") pod \"certified-operators-q5tk2\" (UID: \"c96c1983-e09a-4afe-93ee-16ad11df114a\") " pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.426398 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c96c1983-e09a-4afe-93ee-16ad11df114a-catalog-content\") pod \"certified-operators-q5tk2\" (UID: \"c96c1983-e09a-4afe-93ee-16ad11df114a\") " pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.426427 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c96c1983-e09a-4afe-93ee-16ad11df114a-utilities\") pod \"certified-operators-q5tk2\" (UID: \"c96c1983-e09a-4afe-93ee-16ad11df114a\") " pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.427366 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c96c1983-e09a-4afe-93ee-16ad11df114a-catalog-content\") pod \"certified-operators-q5tk2\" (UID: \"c96c1983-e09a-4afe-93ee-16ad11df114a\") " pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.427381 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c96c1983-e09a-4afe-93ee-16ad11df114a-utilities\") pod \"certified-operators-q5tk2\" (UID: \"c96c1983-e09a-4afe-93ee-16ad11df114a\") " pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.450862 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqm7w\" (UniqueName: \"kubernetes.io/projected/c96c1983-e09a-4afe-93ee-16ad11df114a-kube-api-access-bqm7w\") pod \"certified-operators-q5tk2\" (UID: \"c96c1983-e09a-4afe-93ee-16ad11df114a\") " pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:27 crc kubenswrapper[5055]: I1011 08:58:27.505021 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:28 crc kubenswrapper[5055]: I1011 08:58:28.044972 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q5tk2"] Oct 11 08:58:28 crc kubenswrapper[5055]: I1011 08:58:28.902686 5055 generic.go:334] "Generic (PLEG): container finished" podID="c96c1983-e09a-4afe-93ee-16ad11df114a" containerID="7285ed80ed4e62b17bbfc5dea11df1f6b7f207685f10f52b680e75686d00e2a9" exitCode=0 Oct 11 08:58:28 crc kubenswrapper[5055]: I1011 08:58:28.902847 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5tk2" event={"ID":"c96c1983-e09a-4afe-93ee-16ad11df114a","Type":"ContainerDied","Data":"7285ed80ed4e62b17bbfc5dea11df1f6b7f207685f10f52b680e75686d00e2a9"} Oct 11 08:58:28 crc kubenswrapper[5055]: I1011 08:58:28.905231 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5tk2" event={"ID":"c96c1983-e09a-4afe-93ee-16ad11df114a","Type":"ContainerStarted","Data":"e2cff51593048d3dda592a2ab5880af8eb53e85704e22bcf5b3a51653d7f6b6a"} Oct 11 08:58:28 crc kubenswrapper[5055]: I1011 08:58:28.906855 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 08:58:30 crc kubenswrapper[5055]: I1011 08:58:30.920468 5055 generic.go:334] "Generic (PLEG): container finished" podID="c96c1983-e09a-4afe-93ee-16ad11df114a" containerID="6e279e9bb1cb1d74efe7f1d43ab1197f92cd143af81af89312612843fdccb0f7" exitCode=0 Oct 11 08:58:30 crc kubenswrapper[5055]: I1011 08:58:30.920586 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5tk2" event={"ID":"c96c1983-e09a-4afe-93ee-16ad11df114a","Type":"ContainerDied","Data":"6e279e9bb1cb1d74efe7f1d43ab1197f92cd143af81af89312612843fdccb0f7"} Oct 11 08:58:31 crc kubenswrapper[5055]: I1011 08:58:31.931952 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5tk2" event={"ID":"c96c1983-e09a-4afe-93ee-16ad11df114a","Type":"ContainerStarted","Data":"7c2f93e372e97e15969389ccfb5b82c58ee22379fe88753e518a5dd406497758"} Oct 11 08:58:31 crc kubenswrapper[5055]: I1011 08:58:31.953262 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-q5tk2" podStartSLOduration=2.487019172 podStartE2EDuration="4.953240488s" podCreationTimestamp="2025-10-11 08:58:27 +0000 UTC" firstStartedPulling="2025-10-11 08:58:28.906130515 +0000 UTC m=+7492.680404352" lastFinishedPulling="2025-10-11 08:58:31.372351851 +0000 UTC m=+7495.146625668" observedRunningTime="2025-10-11 08:58:31.951792207 +0000 UTC m=+7495.726066054" watchObservedRunningTime="2025-10-11 08:58:31.953240488 +0000 UTC m=+7495.727514305" Oct 11 08:58:32 crc kubenswrapper[5055]: I1011 08:58:32.421975 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:58:32 crc kubenswrapper[5055]: I1011 08:58:32.422030 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:58:37 crc kubenswrapper[5055]: I1011 08:58:37.505341 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:37 crc kubenswrapper[5055]: I1011 08:58:37.506147 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:37 crc kubenswrapper[5055]: I1011 08:58:37.576687 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:38 crc kubenswrapper[5055]: I1011 08:58:38.025577 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:38 crc kubenswrapper[5055]: I1011 08:58:38.075318 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q5tk2"] Oct 11 08:58:39 crc kubenswrapper[5055]: I1011 08:58:39.993390 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-q5tk2" podUID="c96c1983-e09a-4afe-93ee-16ad11df114a" containerName="registry-server" containerID="cri-o://7c2f93e372e97e15969389ccfb5b82c58ee22379fe88753e518a5dd406497758" gracePeriod=2 Oct 11 08:58:40 crc kubenswrapper[5055]: I1011 08:58:40.424858 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:40 crc kubenswrapper[5055]: I1011 08:58:40.566278 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c96c1983-e09a-4afe-93ee-16ad11df114a-catalog-content\") pod \"c96c1983-e09a-4afe-93ee-16ad11df114a\" (UID: \"c96c1983-e09a-4afe-93ee-16ad11df114a\") " Oct 11 08:58:40 crc kubenswrapper[5055]: I1011 08:58:40.566326 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqm7w\" (UniqueName: \"kubernetes.io/projected/c96c1983-e09a-4afe-93ee-16ad11df114a-kube-api-access-bqm7w\") pod \"c96c1983-e09a-4afe-93ee-16ad11df114a\" (UID: \"c96c1983-e09a-4afe-93ee-16ad11df114a\") " Oct 11 08:58:40 crc kubenswrapper[5055]: I1011 08:58:40.566386 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c96c1983-e09a-4afe-93ee-16ad11df114a-utilities\") pod \"c96c1983-e09a-4afe-93ee-16ad11df114a\" (UID: \"c96c1983-e09a-4afe-93ee-16ad11df114a\") " Oct 11 08:58:40 crc kubenswrapper[5055]: I1011 08:58:40.567673 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c96c1983-e09a-4afe-93ee-16ad11df114a-utilities" (OuterVolumeSpecName: "utilities") pod "c96c1983-e09a-4afe-93ee-16ad11df114a" (UID: "c96c1983-e09a-4afe-93ee-16ad11df114a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:58:40 crc kubenswrapper[5055]: I1011 08:58:40.573304 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c96c1983-e09a-4afe-93ee-16ad11df114a-kube-api-access-bqm7w" (OuterVolumeSpecName: "kube-api-access-bqm7w") pod "c96c1983-e09a-4afe-93ee-16ad11df114a" (UID: "c96c1983-e09a-4afe-93ee-16ad11df114a"). InnerVolumeSpecName "kube-api-access-bqm7w". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 08:58:40 crc kubenswrapper[5055]: I1011 08:58:40.609280 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c96c1983-e09a-4afe-93ee-16ad11df114a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c96c1983-e09a-4afe-93ee-16ad11df114a" (UID: "c96c1983-e09a-4afe-93ee-16ad11df114a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 08:58:40 crc kubenswrapper[5055]: I1011 08:58:40.667970 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c96c1983-e09a-4afe-93ee-16ad11df114a-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 08:58:40 crc kubenswrapper[5055]: I1011 08:58:40.668005 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqm7w\" (UniqueName: \"kubernetes.io/projected/c96c1983-e09a-4afe-93ee-16ad11df114a-kube-api-access-bqm7w\") on node \"crc\" DevicePath \"\"" Oct 11 08:58:40 crc kubenswrapper[5055]: I1011 08:58:40.668022 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c96c1983-e09a-4afe-93ee-16ad11df114a-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.003879 5055 generic.go:334] "Generic (PLEG): container finished" podID="c96c1983-e09a-4afe-93ee-16ad11df114a" containerID="7c2f93e372e97e15969389ccfb5b82c58ee22379fe88753e518a5dd406497758" exitCode=0 Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.003966 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5tk2" event={"ID":"c96c1983-e09a-4afe-93ee-16ad11df114a","Type":"ContainerDied","Data":"7c2f93e372e97e15969389ccfb5b82c58ee22379fe88753e518a5dd406497758"} Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.004024 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q5tk2" event={"ID":"c96c1983-e09a-4afe-93ee-16ad11df114a","Type":"ContainerDied","Data":"e2cff51593048d3dda592a2ab5880af8eb53e85704e22bcf5b3a51653d7f6b6a"} Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.004031 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q5tk2" Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.004051 5055 scope.go:117] "RemoveContainer" containerID="7c2f93e372e97e15969389ccfb5b82c58ee22379fe88753e518a5dd406497758" Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.024307 5055 scope.go:117] "RemoveContainer" containerID="6e279e9bb1cb1d74efe7f1d43ab1197f92cd143af81af89312612843fdccb0f7" Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.061313 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-q5tk2"] Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.062082 5055 scope.go:117] "RemoveContainer" containerID="7285ed80ed4e62b17bbfc5dea11df1f6b7f207685f10f52b680e75686d00e2a9" Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.069651 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-q5tk2"] Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.087627 5055 scope.go:117] "RemoveContainer" containerID="7c2f93e372e97e15969389ccfb5b82c58ee22379fe88753e518a5dd406497758" Oct 11 08:58:41 crc kubenswrapper[5055]: E1011 08:58:41.088100 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c2f93e372e97e15969389ccfb5b82c58ee22379fe88753e518a5dd406497758\": container with ID starting with 7c2f93e372e97e15969389ccfb5b82c58ee22379fe88753e518a5dd406497758 not found: ID does not exist" containerID="7c2f93e372e97e15969389ccfb5b82c58ee22379fe88753e518a5dd406497758" Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.088159 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c2f93e372e97e15969389ccfb5b82c58ee22379fe88753e518a5dd406497758"} err="failed to get container status \"7c2f93e372e97e15969389ccfb5b82c58ee22379fe88753e518a5dd406497758\": rpc error: code = NotFound desc = could not find container \"7c2f93e372e97e15969389ccfb5b82c58ee22379fe88753e518a5dd406497758\": container with ID starting with 7c2f93e372e97e15969389ccfb5b82c58ee22379fe88753e518a5dd406497758 not found: ID does not exist" Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.088195 5055 scope.go:117] "RemoveContainer" containerID="6e279e9bb1cb1d74efe7f1d43ab1197f92cd143af81af89312612843fdccb0f7" Oct 11 08:58:41 crc kubenswrapper[5055]: E1011 08:58:41.088593 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e279e9bb1cb1d74efe7f1d43ab1197f92cd143af81af89312612843fdccb0f7\": container with ID starting with 6e279e9bb1cb1d74efe7f1d43ab1197f92cd143af81af89312612843fdccb0f7 not found: ID does not exist" containerID="6e279e9bb1cb1d74efe7f1d43ab1197f92cd143af81af89312612843fdccb0f7" Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.088619 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e279e9bb1cb1d74efe7f1d43ab1197f92cd143af81af89312612843fdccb0f7"} err="failed to get container status \"6e279e9bb1cb1d74efe7f1d43ab1197f92cd143af81af89312612843fdccb0f7\": rpc error: code = NotFound desc = could not find container \"6e279e9bb1cb1d74efe7f1d43ab1197f92cd143af81af89312612843fdccb0f7\": container with ID starting with 6e279e9bb1cb1d74efe7f1d43ab1197f92cd143af81af89312612843fdccb0f7 not found: ID does not exist" Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.088637 5055 scope.go:117] "RemoveContainer" containerID="7285ed80ed4e62b17bbfc5dea11df1f6b7f207685f10f52b680e75686d00e2a9" Oct 11 08:58:41 crc kubenswrapper[5055]: E1011 08:58:41.088927 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7285ed80ed4e62b17bbfc5dea11df1f6b7f207685f10f52b680e75686d00e2a9\": container with ID starting with 7285ed80ed4e62b17bbfc5dea11df1f6b7f207685f10f52b680e75686d00e2a9 not found: ID does not exist" containerID="7285ed80ed4e62b17bbfc5dea11df1f6b7f207685f10f52b680e75686d00e2a9" Oct 11 08:58:41 crc kubenswrapper[5055]: I1011 08:58:41.088959 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7285ed80ed4e62b17bbfc5dea11df1f6b7f207685f10f52b680e75686d00e2a9"} err="failed to get container status \"7285ed80ed4e62b17bbfc5dea11df1f6b7f207685f10f52b680e75686d00e2a9\": rpc error: code = NotFound desc = could not find container \"7285ed80ed4e62b17bbfc5dea11df1f6b7f207685f10f52b680e75686d00e2a9\": container with ID starting with 7285ed80ed4e62b17bbfc5dea11df1f6b7f207685f10f52b680e75686d00e2a9 not found: ID does not exist" Oct 11 08:58:43 crc kubenswrapper[5055]: I1011 08:58:43.001515 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c96c1983-e09a-4afe-93ee-16ad11df114a" path="/var/lib/kubelet/pods/c96c1983-e09a-4afe-93ee-16ad11df114a/volumes" Oct 11 08:59:02 crc kubenswrapper[5055]: I1011 08:59:02.422009 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 08:59:02 crc kubenswrapper[5055]: I1011 08:59:02.422559 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 08:59:02 crc kubenswrapper[5055]: I1011 08:59:02.422602 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 08:59:02 crc kubenswrapper[5055]: I1011 08:59:02.423136 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9beeb86f04bd6ee20382227b37fc46eaa95984219227544a26815ea3886019de"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 08:59:02 crc kubenswrapper[5055]: I1011 08:59:02.423187 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://9beeb86f04bd6ee20382227b37fc46eaa95984219227544a26815ea3886019de" gracePeriod=600 Oct 11 08:59:03 crc kubenswrapper[5055]: I1011 08:59:03.195103 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="9beeb86f04bd6ee20382227b37fc46eaa95984219227544a26815ea3886019de" exitCode=0 Oct 11 08:59:03 crc kubenswrapper[5055]: I1011 08:59:03.195947 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"9beeb86f04bd6ee20382227b37fc46eaa95984219227544a26815ea3886019de"} Oct 11 08:59:03 crc kubenswrapper[5055]: I1011 08:59:03.196001 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90"} Oct 11 08:59:03 crc kubenswrapper[5055]: I1011 08:59:03.196034 5055 scope.go:117] "RemoveContainer" containerID="40528c9af78f7aee3b417449809c794757170b8c3d80a00c765f79f6b6d11c0d" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.139023 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm"] Oct 11 09:00:00 crc kubenswrapper[5055]: E1011 09:00:00.141590 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c96c1983-e09a-4afe-93ee-16ad11df114a" containerName="extract-utilities" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.141685 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c96c1983-e09a-4afe-93ee-16ad11df114a" containerName="extract-utilities" Oct 11 09:00:00 crc kubenswrapper[5055]: E1011 09:00:00.141759 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c96c1983-e09a-4afe-93ee-16ad11df114a" containerName="extract-content" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.141838 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c96c1983-e09a-4afe-93ee-16ad11df114a" containerName="extract-content" Oct 11 09:00:00 crc kubenswrapper[5055]: E1011 09:00:00.141897 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c96c1983-e09a-4afe-93ee-16ad11df114a" containerName="registry-server" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.141949 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="c96c1983-e09a-4afe-93ee-16ad11df114a" containerName="registry-server" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.142246 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="c96c1983-e09a-4afe-93ee-16ad11df114a" containerName="registry-server" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.142866 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.145323 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.145746 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.151999 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm"] Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.268900 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9xhd\" (UniqueName: \"kubernetes.io/projected/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-kube-api-access-w9xhd\") pod \"collect-profiles-29336220-tdqsm\" (UID: \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.269007 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-config-volume\") pod \"collect-profiles-29336220-tdqsm\" (UID: \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.269043 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-secret-volume\") pod \"collect-profiles-29336220-tdqsm\" (UID: \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.369881 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-config-volume\") pod \"collect-profiles-29336220-tdqsm\" (UID: \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.369946 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-secret-volume\") pod \"collect-profiles-29336220-tdqsm\" (UID: \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.369978 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9xhd\" (UniqueName: \"kubernetes.io/projected/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-kube-api-access-w9xhd\") pod \"collect-profiles-29336220-tdqsm\" (UID: \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.370858 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-config-volume\") pod \"collect-profiles-29336220-tdqsm\" (UID: \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.377331 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-secret-volume\") pod \"collect-profiles-29336220-tdqsm\" (UID: \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.390665 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9xhd\" (UniqueName: \"kubernetes.io/projected/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-kube-api-access-w9xhd\") pod \"collect-profiles-29336220-tdqsm\" (UID: \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.470184 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" Oct 11 09:00:00 crc kubenswrapper[5055]: I1011 09:00:00.870353 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm"] Oct 11 09:00:01 crc kubenswrapper[5055]: I1011 09:00:01.718738 5055 generic.go:334] "Generic (PLEG): container finished" podID="ebdf6aa8-a969-4cff-8d3f-43496bb3a199" containerID="c44ecaf0aaac5689eb34603f8a93114daa930c650212c02e754ec9386bb34f45" exitCode=0 Oct 11 09:00:01 crc kubenswrapper[5055]: I1011 09:00:01.718839 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" event={"ID":"ebdf6aa8-a969-4cff-8d3f-43496bb3a199","Type":"ContainerDied","Data":"c44ecaf0aaac5689eb34603f8a93114daa930c650212c02e754ec9386bb34f45"} Oct 11 09:00:01 crc kubenswrapper[5055]: I1011 09:00:01.719053 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" event={"ID":"ebdf6aa8-a969-4cff-8d3f-43496bb3a199","Type":"ContainerStarted","Data":"93b8070457afea0de14fe8aaaf5c3d47d19c52f6e11f289ee12acc2d830b4533"} Oct 11 09:00:02 crc kubenswrapper[5055]: I1011 09:00:02.999091 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" Oct 11 09:00:03 crc kubenswrapper[5055]: I1011 09:00:03.106274 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-secret-volume\") pod \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\" (UID: \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\") " Oct 11 09:00:03 crc kubenswrapper[5055]: I1011 09:00:03.106316 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-config-volume\") pod \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\" (UID: \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\") " Oct 11 09:00:03 crc kubenswrapper[5055]: I1011 09:00:03.106370 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9xhd\" (UniqueName: \"kubernetes.io/projected/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-kube-api-access-w9xhd\") pod \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\" (UID: \"ebdf6aa8-a969-4cff-8d3f-43496bb3a199\") " Oct 11 09:00:03 crc kubenswrapper[5055]: I1011 09:00:03.107383 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-config-volume" (OuterVolumeSpecName: "config-volume") pod "ebdf6aa8-a969-4cff-8d3f-43496bb3a199" (UID: "ebdf6aa8-a969-4cff-8d3f-43496bb3a199"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 09:00:03 crc kubenswrapper[5055]: I1011 09:00:03.111458 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ebdf6aa8-a969-4cff-8d3f-43496bb3a199" (UID: "ebdf6aa8-a969-4cff-8d3f-43496bb3a199"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 09:00:03 crc kubenswrapper[5055]: I1011 09:00:03.111526 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-kube-api-access-w9xhd" (OuterVolumeSpecName: "kube-api-access-w9xhd") pod "ebdf6aa8-a969-4cff-8d3f-43496bb3a199" (UID: "ebdf6aa8-a969-4cff-8d3f-43496bb3a199"). InnerVolumeSpecName "kube-api-access-w9xhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 09:00:03 crc kubenswrapper[5055]: I1011 09:00:03.207974 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9xhd\" (UniqueName: \"kubernetes.io/projected/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-kube-api-access-w9xhd\") on node \"crc\" DevicePath \"\"" Oct 11 09:00:03 crc kubenswrapper[5055]: I1011 09:00:03.208019 5055 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 09:00:03 crc kubenswrapper[5055]: I1011 09:00:03.208030 5055 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ebdf6aa8-a969-4cff-8d3f-43496bb3a199-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 09:00:03 crc kubenswrapper[5055]: I1011 09:00:03.737463 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" event={"ID":"ebdf6aa8-a969-4cff-8d3f-43496bb3a199","Type":"ContainerDied","Data":"93b8070457afea0de14fe8aaaf5c3d47d19c52f6e11f289ee12acc2d830b4533"} Oct 11 09:00:03 crc kubenswrapper[5055]: I1011 09:00:03.737507 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="93b8070457afea0de14fe8aaaf5c3d47d19c52f6e11f289ee12acc2d830b4533" Oct 11 09:00:03 crc kubenswrapper[5055]: I1011 09:00:03.737563 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336220-tdqsm" Oct 11 09:00:04 crc kubenswrapper[5055]: I1011 09:00:04.058463 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48"] Oct 11 09:00:04 crc kubenswrapper[5055]: I1011 09:00:04.063032 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336175-lsn48"] Oct 11 09:00:05 crc kubenswrapper[5055]: I1011 09:00:05.001062 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a04b6dd-fd0e-43f6-a451-d017a7190daf" path="/var/lib/kubelet/pods/4a04b6dd-fd0e-43f6-a451-d017a7190daf/volumes" Oct 11 09:00:55 crc kubenswrapper[5055]: I1011 09:00:55.281328 5055 scope.go:117] "RemoveContainer" containerID="c56f262735a094c24b121a1d0a2c3f842942c07b1918ad7593d4c38e8444e161" Oct 11 09:01:02 crc kubenswrapper[5055]: I1011 09:01:02.422722 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 09:01:02 crc kubenswrapper[5055]: I1011 09:01:02.423361 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 09:01:32 crc kubenswrapper[5055]: I1011 09:01:32.422721 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 09:01:32 crc kubenswrapper[5055]: I1011 09:01:32.423534 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 09:02:02 crc kubenswrapper[5055]: I1011 09:02:02.422184 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 09:02:02 crc kubenswrapper[5055]: I1011 09:02:02.422900 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 09:02:02 crc kubenswrapper[5055]: I1011 09:02:02.422961 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 09:02:02 crc kubenswrapper[5055]: I1011 09:02:02.423547 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 09:02:02 crc kubenswrapper[5055]: I1011 09:02:02.423612 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" gracePeriod=600 Oct 11 09:02:02 crc kubenswrapper[5055]: E1011 09:02:02.544576 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:02:02 crc kubenswrapper[5055]: I1011 09:02:02.811794 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" exitCode=0 Oct 11 09:02:02 crc kubenswrapper[5055]: I1011 09:02:02.811838 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90"} Oct 11 09:02:02 crc kubenswrapper[5055]: I1011 09:02:02.811868 5055 scope.go:117] "RemoveContainer" containerID="9beeb86f04bd6ee20382227b37fc46eaa95984219227544a26815ea3886019de" Oct 11 09:02:02 crc kubenswrapper[5055]: I1011 09:02:02.812385 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:02:02 crc kubenswrapper[5055]: E1011 09:02:02.812603 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:02:14 crc kubenswrapper[5055]: I1011 09:02:14.993971 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:02:14 crc kubenswrapper[5055]: E1011 09:02:14.996185 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:02:28 crc kubenswrapper[5055]: I1011 09:02:28.993618 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:02:28 crc kubenswrapper[5055]: E1011 09:02:28.994412 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:02:41 crc kubenswrapper[5055]: I1011 09:02:41.994001 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:02:41 crc kubenswrapper[5055]: E1011 09:02:41.994860 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:02:54 crc kubenswrapper[5055]: I1011 09:02:54.994628 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:02:54 crc kubenswrapper[5055]: E1011 09:02:54.995821 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:03:08 crc kubenswrapper[5055]: I1011 09:03:08.993498 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:03:08 crc kubenswrapper[5055]: E1011 09:03:08.994242 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:03:21 crc kubenswrapper[5055]: I1011 09:03:21.993536 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:03:21 crc kubenswrapper[5055]: E1011 09:03:21.994206 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:03:34 crc kubenswrapper[5055]: I1011 09:03:34.993615 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:03:34 crc kubenswrapper[5055]: E1011 09:03:34.994470 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:03:45 crc kubenswrapper[5055]: I1011 09:03:45.993385 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:03:45 crc kubenswrapper[5055]: E1011 09:03:45.995611 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:03:57 crc kubenswrapper[5055]: I1011 09:03:57.993555 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:03:57 crc kubenswrapper[5055]: E1011 09:03:57.994120 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:04:11 crc kubenswrapper[5055]: I1011 09:04:11.994011 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:04:11 crc kubenswrapper[5055]: E1011 09:04:11.994645 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:04:24 crc kubenswrapper[5055]: I1011 09:04:24.993314 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:04:24 crc kubenswrapper[5055]: E1011 09:04:24.993975 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:04:36 crc kubenswrapper[5055]: I1011 09:04:36.998791 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:04:37 crc kubenswrapper[5055]: E1011 09:04:36.999529 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:04:47 crc kubenswrapper[5055]: I1011 09:04:47.993390 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:04:47 crc kubenswrapper[5055]: E1011 09:04:47.994266 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:05:02 crc kubenswrapper[5055]: I1011 09:05:02.994040 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:05:02 crc kubenswrapper[5055]: E1011 09:05:02.994993 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:05:15 crc kubenswrapper[5055]: I1011 09:05:15.994145 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:05:15 crc kubenswrapper[5055]: E1011 09:05:15.994990 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:05:29 crc kubenswrapper[5055]: I1011 09:05:29.994124 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:05:29 crc kubenswrapper[5055]: E1011 09:05:29.995066 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.206930 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vml7c"] Oct 11 09:05:34 crc kubenswrapper[5055]: E1011 09:05:34.207995 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebdf6aa8-a969-4cff-8d3f-43496bb3a199" containerName="collect-profiles" Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.208046 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebdf6aa8-a969-4cff-8d3f-43496bb3a199" containerName="collect-profiles" Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.208392 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebdf6aa8-a969-4cff-8d3f-43496bb3a199" containerName="collect-profiles" Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.210755 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.231895 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vml7c"] Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.317321 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fx8b\" (UniqueName: \"kubernetes.io/projected/d73085c8-514c-4a07-bab5-80b907da4d75-kube-api-access-2fx8b\") pod \"community-operators-vml7c\" (UID: \"d73085c8-514c-4a07-bab5-80b907da4d75\") " pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.317390 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73085c8-514c-4a07-bab5-80b907da4d75-utilities\") pod \"community-operators-vml7c\" (UID: \"d73085c8-514c-4a07-bab5-80b907da4d75\") " pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.317434 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73085c8-514c-4a07-bab5-80b907da4d75-catalog-content\") pod \"community-operators-vml7c\" (UID: \"d73085c8-514c-4a07-bab5-80b907da4d75\") " pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.418870 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73085c8-514c-4a07-bab5-80b907da4d75-utilities\") pod \"community-operators-vml7c\" (UID: \"d73085c8-514c-4a07-bab5-80b907da4d75\") " pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.418955 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73085c8-514c-4a07-bab5-80b907da4d75-catalog-content\") pod \"community-operators-vml7c\" (UID: \"d73085c8-514c-4a07-bab5-80b907da4d75\") " pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.419012 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fx8b\" (UniqueName: \"kubernetes.io/projected/d73085c8-514c-4a07-bab5-80b907da4d75-kube-api-access-2fx8b\") pod \"community-operators-vml7c\" (UID: \"d73085c8-514c-4a07-bab5-80b907da4d75\") " pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.419446 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73085c8-514c-4a07-bab5-80b907da4d75-utilities\") pod \"community-operators-vml7c\" (UID: \"d73085c8-514c-4a07-bab5-80b907da4d75\") " pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.419512 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73085c8-514c-4a07-bab5-80b907da4d75-catalog-content\") pod \"community-operators-vml7c\" (UID: \"d73085c8-514c-4a07-bab5-80b907da4d75\") " pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.437899 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fx8b\" (UniqueName: \"kubernetes.io/projected/d73085c8-514c-4a07-bab5-80b907da4d75-kube-api-access-2fx8b\") pod \"community-operators-vml7c\" (UID: \"d73085c8-514c-4a07-bab5-80b907da4d75\") " pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:34 crc kubenswrapper[5055]: I1011 09:05:34.556682 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:35 crc kubenswrapper[5055]: I1011 09:05:35.016435 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vml7c"] Oct 11 09:05:35 crc kubenswrapper[5055]: I1011 09:05:35.640598 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vml7c" event={"ID":"d73085c8-514c-4a07-bab5-80b907da4d75","Type":"ContainerDied","Data":"06cbfe4801037449b6d9e90123f9b7addc8c41aee4efa43923dc922ac1fb4bda"} Oct 11 09:05:35 crc kubenswrapper[5055]: I1011 09:05:35.640511 5055 generic.go:334] "Generic (PLEG): container finished" podID="d73085c8-514c-4a07-bab5-80b907da4d75" containerID="06cbfe4801037449b6d9e90123f9b7addc8c41aee4efa43923dc922ac1fb4bda" exitCode=0 Oct 11 09:05:35 crc kubenswrapper[5055]: I1011 09:05:35.641324 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vml7c" event={"ID":"d73085c8-514c-4a07-bab5-80b907da4d75","Type":"ContainerStarted","Data":"fec1bd02492b5ad5858e3f594d77e005293e6c665e1d24af2be5c5627faed1bf"} Oct 11 09:05:35 crc kubenswrapper[5055]: I1011 09:05:35.645054 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 09:05:36 crc kubenswrapper[5055]: I1011 09:05:36.653304 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vml7c" event={"ID":"d73085c8-514c-4a07-bab5-80b907da4d75","Type":"ContainerStarted","Data":"6690e3eeec61fb507a47a64ad1cc7c29308702e83e64774feacc0a2652b5191e"} Oct 11 09:05:37 crc kubenswrapper[5055]: I1011 09:05:37.665010 5055 generic.go:334] "Generic (PLEG): container finished" podID="d73085c8-514c-4a07-bab5-80b907da4d75" containerID="6690e3eeec61fb507a47a64ad1cc7c29308702e83e64774feacc0a2652b5191e" exitCode=0 Oct 11 09:05:37 crc kubenswrapper[5055]: I1011 09:05:37.665113 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vml7c" event={"ID":"d73085c8-514c-4a07-bab5-80b907da4d75","Type":"ContainerDied","Data":"6690e3eeec61fb507a47a64ad1cc7c29308702e83e64774feacc0a2652b5191e"} Oct 11 09:05:38 crc kubenswrapper[5055]: I1011 09:05:38.676676 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vml7c" event={"ID":"d73085c8-514c-4a07-bab5-80b907da4d75","Type":"ContainerStarted","Data":"7ad55ae2436f978540ef028badada42a5be45734bedaa5108d1a4faa95236e21"} Oct 11 09:05:38 crc kubenswrapper[5055]: I1011 09:05:38.707823 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vml7c" podStartSLOduration=2.240756531 podStartE2EDuration="4.707795241s" podCreationTimestamp="2025-10-11 09:05:34 +0000 UTC" firstStartedPulling="2025-10-11 09:05:35.644028005 +0000 UTC m=+7919.418301852" lastFinishedPulling="2025-10-11 09:05:38.111066735 +0000 UTC m=+7921.885340562" observedRunningTime="2025-10-11 09:05:38.701016809 +0000 UTC m=+7922.475290626" watchObservedRunningTime="2025-10-11 09:05:38.707795241 +0000 UTC m=+7922.482069068" Oct 11 09:05:41 crc kubenswrapper[5055]: I1011 09:05:41.993295 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:05:41 crc kubenswrapper[5055]: E1011 09:05:41.993906 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:05:44 crc kubenswrapper[5055]: I1011 09:05:44.557528 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:44 crc kubenswrapper[5055]: I1011 09:05:44.558026 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:44 crc kubenswrapper[5055]: I1011 09:05:44.626343 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:44 crc kubenswrapper[5055]: I1011 09:05:44.775680 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:44 crc kubenswrapper[5055]: I1011 09:05:44.865075 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vml7c"] Oct 11 09:05:46 crc kubenswrapper[5055]: I1011 09:05:46.745402 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vml7c" podUID="d73085c8-514c-4a07-bab5-80b907da4d75" containerName="registry-server" containerID="cri-o://7ad55ae2436f978540ef028badada42a5be45734bedaa5108d1a4faa95236e21" gracePeriod=2 Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.624003 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.709612 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fx8b\" (UniqueName: \"kubernetes.io/projected/d73085c8-514c-4a07-bab5-80b907da4d75-kube-api-access-2fx8b\") pod \"d73085c8-514c-4a07-bab5-80b907da4d75\" (UID: \"d73085c8-514c-4a07-bab5-80b907da4d75\") " Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.710206 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73085c8-514c-4a07-bab5-80b907da4d75-utilities\") pod \"d73085c8-514c-4a07-bab5-80b907da4d75\" (UID: \"d73085c8-514c-4a07-bab5-80b907da4d75\") " Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.710543 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73085c8-514c-4a07-bab5-80b907da4d75-catalog-content\") pod \"d73085c8-514c-4a07-bab5-80b907da4d75\" (UID: \"d73085c8-514c-4a07-bab5-80b907da4d75\") " Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.711231 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d73085c8-514c-4a07-bab5-80b907da4d75-utilities" (OuterVolumeSpecName: "utilities") pod "d73085c8-514c-4a07-bab5-80b907da4d75" (UID: "d73085c8-514c-4a07-bab5-80b907da4d75"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.714657 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d73085c8-514c-4a07-bab5-80b907da4d75-kube-api-access-2fx8b" (OuterVolumeSpecName: "kube-api-access-2fx8b") pod "d73085c8-514c-4a07-bab5-80b907da4d75" (UID: "d73085c8-514c-4a07-bab5-80b907da4d75"). InnerVolumeSpecName "kube-api-access-2fx8b". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.755212 5055 generic.go:334] "Generic (PLEG): container finished" podID="d73085c8-514c-4a07-bab5-80b907da4d75" containerID="7ad55ae2436f978540ef028badada42a5be45734bedaa5108d1a4faa95236e21" exitCode=0 Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.755271 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vml7c" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.755286 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vml7c" event={"ID":"d73085c8-514c-4a07-bab5-80b907da4d75","Type":"ContainerDied","Data":"7ad55ae2436f978540ef028badada42a5be45734bedaa5108d1a4faa95236e21"} Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.755996 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vml7c" event={"ID":"d73085c8-514c-4a07-bab5-80b907da4d75","Type":"ContainerDied","Data":"fec1bd02492b5ad5858e3f594d77e005293e6c665e1d24af2be5c5627faed1bf"} Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.756020 5055 scope.go:117] "RemoveContainer" containerID="7ad55ae2436f978540ef028badada42a5be45734bedaa5108d1a4faa95236e21" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.764817 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d73085c8-514c-4a07-bab5-80b907da4d75-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d73085c8-514c-4a07-bab5-80b907da4d75" (UID: "d73085c8-514c-4a07-bab5-80b907da4d75"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.787199 5055 scope.go:117] "RemoveContainer" containerID="6690e3eeec61fb507a47a64ad1cc7c29308702e83e64774feacc0a2652b5191e" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.810679 5055 scope.go:117] "RemoveContainer" containerID="06cbfe4801037449b6d9e90123f9b7addc8c41aee4efa43923dc922ac1fb4bda" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.811981 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d73085c8-514c-4a07-bab5-80b907da4d75-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.812008 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fx8b\" (UniqueName: \"kubernetes.io/projected/d73085c8-514c-4a07-bab5-80b907da4d75-kube-api-access-2fx8b\") on node \"crc\" DevicePath \"\"" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.812024 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d73085c8-514c-4a07-bab5-80b907da4d75-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.831631 5055 scope.go:117] "RemoveContainer" containerID="7ad55ae2436f978540ef028badada42a5be45734bedaa5108d1a4faa95236e21" Oct 11 09:05:47 crc kubenswrapper[5055]: E1011 09:05:47.832170 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ad55ae2436f978540ef028badada42a5be45734bedaa5108d1a4faa95236e21\": container with ID starting with 7ad55ae2436f978540ef028badada42a5be45734bedaa5108d1a4faa95236e21 not found: ID does not exist" containerID="7ad55ae2436f978540ef028badada42a5be45734bedaa5108d1a4faa95236e21" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.832213 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ad55ae2436f978540ef028badada42a5be45734bedaa5108d1a4faa95236e21"} err="failed to get container status \"7ad55ae2436f978540ef028badada42a5be45734bedaa5108d1a4faa95236e21\": rpc error: code = NotFound desc = could not find container \"7ad55ae2436f978540ef028badada42a5be45734bedaa5108d1a4faa95236e21\": container with ID starting with 7ad55ae2436f978540ef028badada42a5be45734bedaa5108d1a4faa95236e21 not found: ID does not exist" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.832240 5055 scope.go:117] "RemoveContainer" containerID="6690e3eeec61fb507a47a64ad1cc7c29308702e83e64774feacc0a2652b5191e" Oct 11 09:05:47 crc kubenswrapper[5055]: E1011 09:05:47.832581 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6690e3eeec61fb507a47a64ad1cc7c29308702e83e64774feacc0a2652b5191e\": container with ID starting with 6690e3eeec61fb507a47a64ad1cc7c29308702e83e64774feacc0a2652b5191e not found: ID does not exist" containerID="6690e3eeec61fb507a47a64ad1cc7c29308702e83e64774feacc0a2652b5191e" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.832603 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6690e3eeec61fb507a47a64ad1cc7c29308702e83e64774feacc0a2652b5191e"} err="failed to get container status \"6690e3eeec61fb507a47a64ad1cc7c29308702e83e64774feacc0a2652b5191e\": rpc error: code = NotFound desc = could not find container \"6690e3eeec61fb507a47a64ad1cc7c29308702e83e64774feacc0a2652b5191e\": container with ID starting with 6690e3eeec61fb507a47a64ad1cc7c29308702e83e64774feacc0a2652b5191e not found: ID does not exist" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.832619 5055 scope.go:117] "RemoveContainer" containerID="06cbfe4801037449b6d9e90123f9b7addc8c41aee4efa43923dc922ac1fb4bda" Oct 11 09:05:47 crc kubenswrapper[5055]: E1011 09:05:47.832903 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06cbfe4801037449b6d9e90123f9b7addc8c41aee4efa43923dc922ac1fb4bda\": container with ID starting with 06cbfe4801037449b6d9e90123f9b7addc8c41aee4efa43923dc922ac1fb4bda not found: ID does not exist" containerID="06cbfe4801037449b6d9e90123f9b7addc8c41aee4efa43923dc922ac1fb4bda" Oct 11 09:05:47 crc kubenswrapper[5055]: I1011 09:05:47.832941 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06cbfe4801037449b6d9e90123f9b7addc8c41aee4efa43923dc922ac1fb4bda"} err="failed to get container status \"06cbfe4801037449b6d9e90123f9b7addc8c41aee4efa43923dc922ac1fb4bda\": rpc error: code = NotFound desc = could not find container \"06cbfe4801037449b6d9e90123f9b7addc8c41aee4efa43923dc922ac1fb4bda\": container with ID starting with 06cbfe4801037449b6d9e90123f9b7addc8c41aee4efa43923dc922ac1fb4bda not found: ID does not exist" Oct 11 09:05:48 crc kubenswrapper[5055]: I1011 09:05:48.090435 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vml7c"] Oct 11 09:05:48 crc kubenswrapper[5055]: I1011 09:05:48.094779 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vml7c"] Oct 11 09:05:49 crc kubenswrapper[5055]: I1011 09:05:49.005433 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d73085c8-514c-4a07-bab5-80b907da4d75" path="/var/lib/kubelet/pods/d73085c8-514c-4a07-bab5-80b907da4d75/volumes" Oct 11 09:05:57 crc kubenswrapper[5055]: I1011 09:05:57.000129 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:05:57 crc kubenswrapper[5055]: E1011 09:05:57.001005 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:06:10 crc kubenswrapper[5055]: I1011 09:06:10.993236 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:06:10 crc kubenswrapper[5055]: E1011 09:06:10.994102 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:06:23 crc kubenswrapper[5055]: I1011 09:06:23.993138 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:06:23 crc kubenswrapper[5055]: E1011 09:06:23.995630 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.614635 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-h26qq"] Oct 11 09:06:31 crc kubenswrapper[5055]: E1011 09:06:31.616716 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d73085c8-514c-4a07-bab5-80b907da4d75" containerName="extract-content" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.616842 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="d73085c8-514c-4a07-bab5-80b907da4d75" containerName="extract-content" Oct 11 09:06:31 crc kubenswrapper[5055]: E1011 09:06:31.616917 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d73085c8-514c-4a07-bab5-80b907da4d75" containerName="extract-utilities" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.616974 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="d73085c8-514c-4a07-bab5-80b907da4d75" containerName="extract-utilities" Oct 11 09:06:31 crc kubenswrapper[5055]: E1011 09:06:31.617034 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d73085c8-514c-4a07-bab5-80b907da4d75" containerName="registry-server" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.617091 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="d73085c8-514c-4a07-bab5-80b907da4d75" containerName="registry-server" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.617317 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="d73085c8-514c-4a07-bab5-80b907da4d75" containerName="registry-server" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.618463 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.630199 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h26qq"] Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.674442 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2d6sd\" (UniqueName: \"kubernetes.io/projected/d4cd1eff-9b70-45ba-83a9-63b16883b64f-kube-api-access-2d6sd\") pod \"redhat-marketplace-h26qq\" (UID: \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\") " pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.674748 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4cd1eff-9b70-45ba-83a9-63b16883b64f-utilities\") pod \"redhat-marketplace-h26qq\" (UID: \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\") " pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.674885 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4cd1eff-9b70-45ba-83a9-63b16883b64f-catalog-content\") pod \"redhat-marketplace-h26qq\" (UID: \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\") " pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.776382 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2d6sd\" (UniqueName: \"kubernetes.io/projected/d4cd1eff-9b70-45ba-83a9-63b16883b64f-kube-api-access-2d6sd\") pod \"redhat-marketplace-h26qq\" (UID: \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\") " pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.776470 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4cd1eff-9b70-45ba-83a9-63b16883b64f-utilities\") pod \"redhat-marketplace-h26qq\" (UID: \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\") " pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.776517 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4cd1eff-9b70-45ba-83a9-63b16883b64f-catalog-content\") pod \"redhat-marketplace-h26qq\" (UID: \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\") " pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.777363 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4cd1eff-9b70-45ba-83a9-63b16883b64f-catalog-content\") pod \"redhat-marketplace-h26qq\" (UID: \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\") " pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.777544 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4cd1eff-9b70-45ba-83a9-63b16883b64f-utilities\") pod \"redhat-marketplace-h26qq\" (UID: \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\") " pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.811202 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2d6sd\" (UniqueName: \"kubernetes.io/projected/d4cd1eff-9b70-45ba-83a9-63b16883b64f-kube-api-access-2d6sd\") pod \"redhat-marketplace-h26qq\" (UID: \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\") " pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:31 crc kubenswrapper[5055]: I1011 09:06:31.950274 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:32 crc kubenswrapper[5055]: I1011 09:06:32.402145 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h26qq"] Oct 11 09:06:32 crc kubenswrapper[5055]: W1011 09:06:32.422017 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4cd1eff_9b70_45ba_83a9_63b16883b64f.slice/crio-92be10177aae0380c1d07fe3d68bb065adc3dcb5bcf3565f8e0525f11b99ccf0 WatchSource:0}: Error finding container 92be10177aae0380c1d07fe3d68bb065adc3dcb5bcf3565f8e0525f11b99ccf0: Status 404 returned error can't find the container with id 92be10177aae0380c1d07fe3d68bb065adc3dcb5bcf3565f8e0525f11b99ccf0 Oct 11 09:06:33 crc kubenswrapper[5055]: I1011 09:06:33.126076 5055 generic.go:334] "Generic (PLEG): container finished" podID="d4cd1eff-9b70-45ba-83a9-63b16883b64f" containerID="80077f8e16b457947e31dd9b706a6319e30c1a03e958ad879d32aedccc9b3d80" exitCode=0 Oct 11 09:06:33 crc kubenswrapper[5055]: I1011 09:06:33.126214 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h26qq" event={"ID":"d4cd1eff-9b70-45ba-83a9-63b16883b64f","Type":"ContainerDied","Data":"80077f8e16b457947e31dd9b706a6319e30c1a03e958ad879d32aedccc9b3d80"} Oct 11 09:06:33 crc kubenswrapper[5055]: I1011 09:06:33.126623 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h26qq" event={"ID":"d4cd1eff-9b70-45ba-83a9-63b16883b64f","Type":"ContainerStarted","Data":"92be10177aae0380c1d07fe3d68bb065adc3dcb5bcf3565f8e0525f11b99ccf0"} Oct 11 09:06:34 crc kubenswrapper[5055]: I1011 09:06:34.136489 5055 generic.go:334] "Generic (PLEG): container finished" podID="d4cd1eff-9b70-45ba-83a9-63b16883b64f" containerID="658fa42c167242dc9efe4185d95931d5fdb39a7c0977ab28c9539053de88d7e2" exitCode=0 Oct 11 09:06:34 crc kubenswrapper[5055]: I1011 09:06:34.137089 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h26qq" event={"ID":"d4cd1eff-9b70-45ba-83a9-63b16883b64f","Type":"ContainerDied","Data":"658fa42c167242dc9efe4185d95931d5fdb39a7c0977ab28c9539053de88d7e2"} Oct 11 09:06:35 crc kubenswrapper[5055]: I1011 09:06:35.150817 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h26qq" event={"ID":"d4cd1eff-9b70-45ba-83a9-63b16883b64f","Type":"ContainerStarted","Data":"73bce32b6f6e12ba3bf665d04814c75413d3f4daa47c981c12f511e325058dfc"} Oct 11 09:06:35 crc kubenswrapper[5055]: I1011 09:06:35.993447 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:06:35 crc kubenswrapper[5055]: E1011 09:06:35.994496 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:06:41 crc kubenswrapper[5055]: I1011 09:06:41.950614 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:41 crc kubenswrapper[5055]: I1011 09:06:41.950673 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:42 crc kubenswrapper[5055]: I1011 09:06:42.018816 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:42 crc kubenswrapper[5055]: I1011 09:06:42.036843 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-h26qq" podStartSLOduration=9.634699521 podStartE2EDuration="11.036828509s" podCreationTimestamp="2025-10-11 09:06:31 +0000 UTC" firstStartedPulling="2025-10-11 09:06:33.130207735 +0000 UTC m=+7976.904481542" lastFinishedPulling="2025-10-11 09:06:34.532336723 +0000 UTC m=+7978.306610530" observedRunningTime="2025-10-11 09:06:35.178571765 +0000 UTC m=+7978.952845612" watchObservedRunningTime="2025-10-11 09:06:42.036828509 +0000 UTC m=+7985.811102316" Oct 11 09:06:42 crc kubenswrapper[5055]: I1011 09:06:42.310691 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:42 crc kubenswrapper[5055]: I1011 09:06:42.374132 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h26qq"] Oct 11 09:06:44 crc kubenswrapper[5055]: I1011 09:06:44.243645 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-h26qq" podUID="d4cd1eff-9b70-45ba-83a9-63b16883b64f" containerName="registry-server" containerID="cri-o://73bce32b6f6e12ba3bf665d04814c75413d3f4daa47c981c12f511e325058dfc" gracePeriod=2 Oct 11 09:06:44 crc kubenswrapper[5055]: I1011 09:06:44.766180 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:44 crc kubenswrapper[5055]: I1011 09:06:44.887484 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d6sd\" (UniqueName: \"kubernetes.io/projected/d4cd1eff-9b70-45ba-83a9-63b16883b64f-kube-api-access-2d6sd\") pod \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\" (UID: \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\") " Oct 11 09:06:44 crc kubenswrapper[5055]: I1011 09:06:44.887568 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4cd1eff-9b70-45ba-83a9-63b16883b64f-catalog-content\") pod \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\" (UID: \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\") " Oct 11 09:06:44 crc kubenswrapper[5055]: I1011 09:06:44.887670 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4cd1eff-9b70-45ba-83a9-63b16883b64f-utilities\") pod \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\" (UID: \"d4cd1eff-9b70-45ba-83a9-63b16883b64f\") " Oct 11 09:06:44 crc kubenswrapper[5055]: I1011 09:06:44.888674 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4cd1eff-9b70-45ba-83a9-63b16883b64f-utilities" (OuterVolumeSpecName: "utilities") pod "d4cd1eff-9b70-45ba-83a9-63b16883b64f" (UID: "d4cd1eff-9b70-45ba-83a9-63b16883b64f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 09:06:44 crc kubenswrapper[5055]: I1011 09:06:44.893919 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4cd1eff-9b70-45ba-83a9-63b16883b64f-kube-api-access-2d6sd" (OuterVolumeSpecName: "kube-api-access-2d6sd") pod "d4cd1eff-9b70-45ba-83a9-63b16883b64f" (UID: "d4cd1eff-9b70-45ba-83a9-63b16883b64f"). InnerVolumeSpecName "kube-api-access-2d6sd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 09:06:44 crc kubenswrapper[5055]: I1011 09:06:44.913895 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4cd1eff-9b70-45ba-83a9-63b16883b64f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d4cd1eff-9b70-45ba-83a9-63b16883b64f" (UID: "d4cd1eff-9b70-45ba-83a9-63b16883b64f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 09:06:44 crc kubenswrapper[5055]: I1011 09:06:44.989122 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d6sd\" (UniqueName: \"kubernetes.io/projected/d4cd1eff-9b70-45ba-83a9-63b16883b64f-kube-api-access-2d6sd\") on node \"crc\" DevicePath \"\"" Oct 11 09:06:44 crc kubenswrapper[5055]: I1011 09:06:44.989151 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4cd1eff-9b70-45ba-83a9-63b16883b64f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 09:06:44 crc kubenswrapper[5055]: I1011 09:06:44.989160 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4cd1eff-9b70-45ba-83a9-63b16883b64f-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.253919 5055 generic.go:334] "Generic (PLEG): container finished" podID="d4cd1eff-9b70-45ba-83a9-63b16883b64f" containerID="73bce32b6f6e12ba3bf665d04814c75413d3f4daa47c981c12f511e325058dfc" exitCode=0 Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.253971 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h26qq" event={"ID":"d4cd1eff-9b70-45ba-83a9-63b16883b64f","Type":"ContainerDied","Data":"73bce32b6f6e12ba3bf665d04814c75413d3f4daa47c981c12f511e325058dfc"} Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.254005 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h26qq" event={"ID":"d4cd1eff-9b70-45ba-83a9-63b16883b64f","Type":"ContainerDied","Data":"92be10177aae0380c1d07fe3d68bb065adc3dcb5bcf3565f8e0525f11b99ccf0"} Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.254026 5055 scope.go:117] "RemoveContainer" containerID="73bce32b6f6e12ba3bf665d04814c75413d3f4daa47c981c12f511e325058dfc" Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.254124 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h26qq" Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.283177 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h26qq"] Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.288003 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-h26qq"] Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.288968 5055 scope.go:117] "RemoveContainer" containerID="658fa42c167242dc9efe4185d95931d5fdb39a7c0977ab28c9539053de88d7e2" Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.315501 5055 scope.go:117] "RemoveContainer" containerID="80077f8e16b457947e31dd9b706a6319e30c1a03e958ad879d32aedccc9b3d80" Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.344589 5055 scope.go:117] "RemoveContainer" containerID="73bce32b6f6e12ba3bf665d04814c75413d3f4daa47c981c12f511e325058dfc" Oct 11 09:06:45 crc kubenswrapper[5055]: E1011 09:06:45.345446 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73bce32b6f6e12ba3bf665d04814c75413d3f4daa47c981c12f511e325058dfc\": container with ID starting with 73bce32b6f6e12ba3bf665d04814c75413d3f4daa47c981c12f511e325058dfc not found: ID does not exist" containerID="73bce32b6f6e12ba3bf665d04814c75413d3f4daa47c981c12f511e325058dfc" Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.345484 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73bce32b6f6e12ba3bf665d04814c75413d3f4daa47c981c12f511e325058dfc"} err="failed to get container status \"73bce32b6f6e12ba3bf665d04814c75413d3f4daa47c981c12f511e325058dfc\": rpc error: code = NotFound desc = could not find container \"73bce32b6f6e12ba3bf665d04814c75413d3f4daa47c981c12f511e325058dfc\": container with ID starting with 73bce32b6f6e12ba3bf665d04814c75413d3f4daa47c981c12f511e325058dfc not found: ID does not exist" Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.345514 5055 scope.go:117] "RemoveContainer" containerID="658fa42c167242dc9efe4185d95931d5fdb39a7c0977ab28c9539053de88d7e2" Oct 11 09:06:45 crc kubenswrapper[5055]: E1011 09:06:45.345855 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"658fa42c167242dc9efe4185d95931d5fdb39a7c0977ab28c9539053de88d7e2\": container with ID starting with 658fa42c167242dc9efe4185d95931d5fdb39a7c0977ab28c9539053de88d7e2 not found: ID does not exist" containerID="658fa42c167242dc9efe4185d95931d5fdb39a7c0977ab28c9539053de88d7e2" Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.345899 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"658fa42c167242dc9efe4185d95931d5fdb39a7c0977ab28c9539053de88d7e2"} err="failed to get container status \"658fa42c167242dc9efe4185d95931d5fdb39a7c0977ab28c9539053de88d7e2\": rpc error: code = NotFound desc = could not find container \"658fa42c167242dc9efe4185d95931d5fdb39a7c0977ab28c9539053de88d7e2\": container with ID starting with 658fa42c167242dc9efe4185d95931d5fdb39a7c0977ab28c9539053de88d7e2 not found: ID does not exist" Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.345932 5055 scope.go:117] "RemoveContainer" containerID="80077f8e16b457947e31dd9b706a6319e30c1a03e958ad879d32aedccc9b3d80" Oct 11 09:06:45 crc kubenswrapper[5055]: E1011 09:06:45.346533 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80077f8e16b457947e31dd9b706a6319e30c1a03e958ad879d32aedccc9b3d80\": container with ID starting with 80077f8e16b457947e31dd9b706a6319e30c1a03e958ad879d32aedccc9b3d80 not found: ID does not exist" containerID="80077f8e16b457947e31dd9b706a6319e30c1a03e958ad879d32aedccc9b3d80" Oct 11 09:06:45 crc kubenswrapper[5055]: I1011 09:06:45.346561 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80077f8e16b457947e31dd9b706a6319e30c1a03e958ad879d32aedccc9b3d80"} err="failed to get container status \"80077f8e16b457947e31dd9b706a6319e30c1a03e958ad879d32aedccc9b3d80\": rpc error: code = NotFound desc = could not find container \"80077f8e16b457947e31dd9b706a6319e30c1a03e958ad879d32aedccc9b3d80\": container with ID starting with 80077f8e16b457947e31dd9b706a6319e30c1a03e958ad879d32aedccc9b3d80 not found: ID does not exist" Oct 11 09:06:47 crc kubenswrapper[5055]: I1011 09:06:47.004072 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4cd1eff-9b70-45ba-83a9-63b16883b64f" path="/var/lib/kubelet/pods/d4cd1eff-9b70-45ba-83a9-63b16883b64f/volumes" Oct 11 09:06:49 crc kubenswrapper[5055]: I1011 09:06:49.994015 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:06:49 crc kubenswrapper[5055]: E1011 09:06:49.994752 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:07:01 crc kubenswrapper[5055]: I1011 09:07:01.994299 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:07:01 crc kubenswrapper[5055]: E1011 09:07:01.995442 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:07:14 crc kubenswrapper[5055]: I1011 09:07:14.993986 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:07:15 crc kubenswrapper[5055]: I1011 09:07:15.550204 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"228ae19a9cf8309853615626c4eed37f26ed81d771575842aabd496e0ef6c2fe"} Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.216267 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lrj99"] Oct 11 09:08:27 crc kubenswrapper[5055]: E1011 09:08:27.217836 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4cd1eff-9b70-45ba-83a9-63b16883b64f" containerName="registry-server" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.217876 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4cd1eff-9b70-45ba-83a9-63b16883b64f" containerName="registry-server" Oct 11 09:08:27 crc kubenswrapper[5055]: E1011 09:08:27.217905 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4cd1eff-9b70-45ba-83a9-63b16883b64f" containerName="extract-content" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.217926 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4cd1eff-9b70-45ba-83a9-63b16883b64f" containerName="extract-content" Oct 11 09:08:27 crc kubenswrapper[5055]: E1011 09:08:27.217982 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4cd1eff-9b70-45ba-83a9-63b16883b64f" containerName="extract-utilities" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.218004 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4cd1eff-9b70-45ba-83a9-63b16883b64f" containerName="extract-utilities" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.218401 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4cd1eff-9b70-45ba-83a9-63b16883b64f" containerName="registry-server" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.221069 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.240983 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lrj99"] Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.337566 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncxlf\" (UniqueName: \"kubernetes.io/projected/11d7f507-0f50-4117-b61a-9684a0661af3-kube-api-access-ncxlf\") pod \"certified-operators-lrj99\" (UID: \"11d7f507-0f50-4117-b61a-9684a0661af3\") " pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.337660 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11d7f507-0f50-4117-b61a-9684a0661af3-catalog-content\") pod \"certified-operators-lrj99\" (UID: \"11d7f507-0f50-4117-b61a-9684a0661af3\") " pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.337703 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11d7f507-0f50-4117-b61a-9684a0661af3-utilities\") pod \"certified-operators-lrj99\" (UID: \"11d7f507-0f50-4117-b61a-9684a0661af3\") " pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.438703 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncxlf\" (UniqueName: \"kubernetes.io/projected/11d7f507-0f50-4117-b61a-9684a0661af3-kube-api-access-ncxlf\") pod \"certified-operators-lrj99\" (UID: \"11d7f507-0f50-4117-b61a-9684a0661af3\") " pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.439041 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11d7f507-0f50-4117-b61a-9684a0661af3-catalog-content\") pod \"certified-operators-lrj99\" (UID: \"11d7f507-0f50-4117-b61a-9684a0661af3\") " pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.439132 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11d7f507-0f50-4117-b61a-9684a0661af3-utilities\") pod \"certified-operators-lrj99\" (UID: \"11d7f507-0f50-4117-b61a-9684a0661af3\") " pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.439606 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11d7f507-0f50-4117-b61a-9684a0661af3-catalog-content\") pod \"certified-operators-lrj99\" (UID: \"11d7f507-0f50-4117-b61a-9684a0661af3\") " pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.439745 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11d7f507-0f50-4117-b61a-9684a0661af3-utilities\") pod \"certified-operators-lrj99\" (UID: \"11d7f507-0f50-4117-b61a-9684a0661af3\") " pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.459230 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncxlf\" (UniqueName: \"kubernetes.io/projected/11d7f507-0f50-4117-b61a-9684a0661af3-kube-api-access-ncxlf\") pod \"certified-operators-lrj99\" (UID: \"11d7f507-0f50-4117-b61a-9684a0661af3\") " pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.549830 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:27 crc kubenswrapper[5055]: I1011 09:08:27.982331 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lrj99"] Oct 11 09:08:28 crc kubenswrapper[5055]: I1011 09:08:28.231327 5055 generic.go:334] "Generic (PLEG): container finished" podID="11d7f507-0f50-4117-b61a-9684a0661af3" containerID="160b29b91a97351636fa4e520b91531537c13733e031a339ec2193e8f42e880f" exitCode=0 Oct 11 09:08:28 crc kubenswrapper[5055]: I1011 09:08:28.231383 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lrj99" event={"ID":"11d7f507-0f50-4117-b61a-9684a0661af3","Type":"ContainerDied","Data":"160b29b91a97351636fa4e520b91531537c13733e031a339ec2193e8f42e880f"} Oct 11 09:08:28 crc kubenswrapper[5055]: I1011 09:08:28.231429 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lrj99" event={"ID":"11d7f507-0f50-4117-b61a-9684a0661af3","Type":"ContainerStarted","Data":"f038a3a2accaa27176e2ee952df7d862d93a7832405d0c8e51a2ecfc27f395c8"} Oct 11 09:08:29 crc kubenswrapper[5055]: I1011 09:08:29.240597 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lrj99" event={"ID":"11d7f507-0f50-4117-b61a-9684a0661af3","Type":"ContainerStarted","Data":"d9b47dd83c5b1d54aec862ff22780f2349ecba786535b4b5da485df617158fc7"} Oct 11 09:08:30 crc kubenswrapper[5055]: I1011 09:08:30.255732 5055 generic.go:334] "Generic (PLEG): container finished" podID="11d7f507-0f50-4117-b61a-9684a0661af3" containerID="d9b47dd83c5b1d54aec862ff22780f2349ecba786535b4b5da485df617158fc7" exitCode=0 Oct 11 09:08:30 crc kubenswrapper[5055]: I1011 09:08:30.255865 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lrj99" event={"ID":"11d7f507-0f50-4117-b61a-9684a0661af3","Type":"ContainerDied","Data":"d9b47dd83c5b1d54aec862ff22780f2349ecba786535b4b5da485df617158fc7"} Oct 11 09:08:31 crc kubenswrapper[5055]: I1011 09:08:31.264745 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lrj99" event={"ID":"11d7f507-0f50-4117-b61a-9684a0661af3","Type":"ContainerStarted","Data":"e403d7545e5535d87cca87ad3622324f1092d25e1cfdb71e0b70d1f721d5dccd"} Oct 11 09:08:31 crc kubenswrapper[5055]: I1011 09:08:31.282358 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lrj99" podStartSLOduration=1.810600642 podStartE2EDuration="4.282343817s" podCreationTimestamp="2025-10-11 09:08:27 +0000 UTC" firstStartedPulling="2025-10-11 09:08:28.233433442 +0000 UTC m=+8092.007707249" lastFinishedPulling="2025-10-11 09:08:30.705176607 +0000 UTC m=+8094.479450424" observedRunningTime="2025-10-11 09:08:31.277993084 +0000 UTC m=+8095.052266911" watchObservedRunningTime="2025-10-11 09:08:31.282343817 +0000 UTC m=+8095.056617624" Oct 11 09:08:37 crc kubenswrapper[5055]: I1011 09:08:37.550393 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:37 crc kubenswrapper[5055]: I1011 09:08:37.550972 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:37 crc kubenswrapper[5055]: I1011 09:08:37.598309 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:38 crc kubenswrapper[5055]: I1011 09:08:38.367723 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:38 crc kubenswrapper[5055]: I1011 09:08:38.412107 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lrj99"] Oct 11 09:08:40 crc kubenswrapper[5055]: I1011 09:08:40.336319 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lrj99" podUID="11d7f507-0f50-4117-b61a-9684a0661af3" containerName="registry-server" containerID="cri-o://e403d7545e5535d87cca87ad3622324f1092d25e1cfdb71e0b70d1f721d5dccd" gracePeriod=2 Oct 11 09:08:40 crc kubenswrapper[5055]: I1011 09:08:40.729157 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:40 crc kubenswrapper[5055]: I1011 09:08:40.846157 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11d7f507-0f50-4117-b61a-9684a0661af3-catalog-content\") pod \"11d7f507-0f50-4117-b61a-9684a0661af3\" (UID: \"11d7f507-0f50-4117-b61a-9684a0661af3\") " Oct 11 09:08:40 crc kubenswrapper[5055]: I1011 09:08:40.846198 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncxlf\" (UniqueName: \"kubernetes.io/projected/11d7f507-0f50-4117-b61a-9684a0661af3-kube-api-access-ncxlf\") pod \"11d7f507-0f50-4117-b61a-9684a0661af3\" (UID: \"11d7f507-0f50-4117-b61a-9684a0661af3\") " Oct 11 09:08:40 crc kubenswrapper[5055]: I1011 09:08:40.846235 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11d7f507-0f50-4117-b61a-9684a0661af3-utilities\") pod \"11d7f507-0f50-4117-b61a-9684a0661af3\" (UID: \"11d7f507-0f50-4117-b61a-9684a0661af3\") " Oct 11 09:08:40 crc kubenswrapper[5055]: I1011 09:08:40.847365 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/11d7f507-0f50-4117-b61a-9684a0661af3-utilities" (OuterVolumeSpecName: "utilities") pod "11d7f507-0f50-4117-b61a-9684a0661af3" (UID: "11d7f507-0f50-4117-b61a-9684a0661af3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 09:08:40 crc kubenswrapper[5055]: I1011 09:08:40.851668 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11d7f507-0f50-4117-b61a-9684a0661af3-kube-api-access-ncxlf" (OuterVolumeSpecName: "kube-api-access-ncxlf") pod "11d7f507-0f50-4117-b61a-9684a0661af3" (UID: "11d7f507-0f50-4117-b61a-9684a0661af3"). InnerVolumeSpecName "kube-api-access-ncxlf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 09:08:40 crc kubenswrapper[5055]: I1011 09:08:40.891097 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/11d7f507-0f50-4117-b61a-9684a0661af3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "11d7f507-0f50-4117-b61a-9684a0661af3" (UID: "11d7f507-0f50-4117-b61a-9684a0661af3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 09:08:40 crc kubenswrapper[5055]: I1011 09:08:40.947643 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11d7f507-0f50-4117-b61a-9684a0661af3-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 09:08:40 crc kubenswrapper[5055]: I1011 09:08:40.947693 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncxlf\" (UniqueName: \"kubernetes.io/projected/11d7f507-0f50-4117-b61a-9684a0661af3-kube-api-access-ncxlf\") on node \"crc\" DevicePath \"\"" Oct 11 09:08:40 crc kubenswrapper[5055]: I1011 09:08:40.947703 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11d7f507-0f50-4117-b61a-9684a0661af3-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.344883 5055 generic.go:334] "Generic (PLEG): container finished" podID="11d7f507-0f50-4117-b61a-9684a0661af3" containerID="e403d7545e5535d87cca87ad3622324f1092d25e1cfdb71e0b70d1f721d5dccd" exitCode=0 Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.344930 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lrj99" event={"ID":"11d7f507-0f50-4117-b61a-9684a0661af3","Type":"ContainerDied","Data":"e403d7545e5535d87cca87ad3622324f1092d25e1cfdb71e0b70d1f721d5dccd"} Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.344944 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lrj99" Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.344967 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lrj99" event={"ID":"11d7f507-0f50-4117-b61a-9684a0661af3","Type":"ContainerDied","Data":"f038a3a2accaa27176e2ee952df7d862d93a7832405d0c8e51a2ecfc27f395c8"} Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.344991 5055 scope.go:117] "RemoveContainer" containerID="e403d7545e5535d87cca87ad3622324f1092d25e1cfdb71e0b70d1f721d5dccd" Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.365795 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lrj99"] Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.372959 5055 scope.go:117] "RemoveContainer" containerID="d9b47dd83c5b1d54aec862ff22780f2349ecba786535b4b5da485df617158fc7" Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.373182 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lrj99"] Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.408661 5055 scope.go:117] "RemoveContainer" containerID="160b29b91a97351636fa4e520b91531537c13733e031a339ec2193e8f42e880f" Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.435629 5055 scope.go:117] "RemoveContainer" containerID="e403d7545e5535d87cca87ad3622324f1092d25e1cfdb71e0b70d1f721d5dccd" Oct 11 09:08:41 crc kubenswrapper[5055]: E1011 09:08:41.436402 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e403d7545e5535d87cca87ad3622324f1092d25e1cfdb71e0b70d1f721d5dccd\": container with ID starting with e403d7545e5535d87cca87ad3622324f1092d25e1cfdb71e0b70d1f721d5dccd not found: ID does not exist" containerID="e403d7545e5535d87cca87ad3622324f1092d25e1cfdb71e0b70d1f721d5dccd" Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.436461 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e403d7545e5535d87cca87ad3622324f1092d25e1cfdb71e0b70d1f721d5dccd"} err="failed to get container status \"e403d7545e5535d87cca87ad3622324f1092d25e1cfdb71e0b70d1f721d5dccd\": rpc error: code = NotFound desc = could not find container \"e403d7545e5535d87cca87ad3622324f1092d25e1cfdb71e0b70d1f721d5dccd\": container with ID starting with e403d7545e5535d87cca87ad3622324f1092d25e1cfdb71e0b70d1f721d5dccd not found: ID does not exist" Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.436487 5055 scope.go:117] "RemoveContainer" containerID="d9b47dd83c5b1d54aec862ff22780f2349ecba786535b4b5da485df617158fc7" Oct 11 09:08:41 crc kubenswrapper[5055]: E1011 09:08:41.436972 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9b47dd83c5b1d54aec862ff22780f2349ecba786535b4b5da485df617158fc7\": container with ID starting with d9b47dd83c5b1d54aec862ff22780f2349ecba786535b4b5da485df617158fc7 not found: ID does not exist" containerID="d9b47dd83c5b1d54aec862ff22780f2349ecba786535b4b5da485df617158fc7" Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.437019 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9b47dd83c5b1d54aec862ff22780f2349ecba786535b4b5da485df617158fc7"} err="failed to get container status \"d9b47dd83c5b1d54aec862ff22780f2349ecba786535b4b5da485df617158fc7\": rpc error: code = NotFound desc = could not find container \"d9b47dd83c5b1d54aec862ff22780f2349ecba786535b4b5da485df617158fc7\": container with ID starting with d9b47dd83c5b1d54aec862ff22780f2349ecba786535b4b5da485df617158fc7 not found: ID does not exist" Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.437036 5055 scope.go:117] "RemoveContainer" containerID="160b29b91a97351636fa4e520b91531537c13733e031a339ec2193e8f42e880f" Oct 11 09:08:41 crc kubenswrapper[5055]: E1011 09:08:41.437459 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"160b29b91a97351636fa4e520b91531537c13733e031a339ec2193e8f42e880f\": container with ID starting with 160b29b91a97351636fa4e520b91531537c13733e031a339ec2193e8f42e880f not found: ID does not exist" containerID="160b29b91a97351636fa4e520b91531537c13733e031a339ec2193e8f42e880f" Oct 11 09:08:41 crc kubenswrapper[5055]: I1011 09:08:41.437503 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"160b29b91a97351636fa4e520b91531537c13733e031a339ec2193e8f42e880f"} err="failed to get container status \"160b29b91a97351636fa4e520b91531537c13733e031a339ec2193e8f42e880f\": rpc error: code = NotFound desc = could not find container \"160b29b91a97351636fa4e520b91531537c13733e031a339ec2193e8f42e880f\": container with ID starting with 160b29b91a97351636fa4e520b91531537c13733e031a339ec2193e8f42e880f not found: ID does not exist" Oct 11 09:08:43 crc kubenswrapper[5055]: I1011 09:08:43.026215 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11d7f507-0f50-4117-b61a-9684a0661af3" path="/var/lib/kubelet/pods/11d7f507-0f50-4117-b61a-9684a0661af3/volumes" Oct 11 09:09:32 crc kubenswrapper[5055]: I1011 09:09:32.422169 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 09:09:32 crc kubenswrapper[5055]: I1011 09:09:32.422693 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 09:10:02 crc kubenswrapper[5055]: I1011 09:10:02.422113 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 09:10:02 crc kubenswrapper[5055]: I1011 09:10:02.422812 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 09:10:32 crc kubenswrapper[5055]: I1011 09:10:32.421966 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 09:10:32 crc kubenswrapper[5055]: I1011 09:10:32.422398 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 09:10:32 crc kubenswrapper[5055]: I1011 09:10:32.422506 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 09:10:32 crc kubenswrapper[5055]: I1011 09:10:32.423077 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"228ae19a9cf8309853615626c4eed37f26ed81d771575842aabd496e0ef6c2fe"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 09:10:32 crc kubenswrapper[5055]: I1011 09:10:32.423129 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://228ae19a9cf8309853615626c4eed37f26ed81d771575842aabd496e0ef6c2fe" gracePeriod=600 Oct 11 09:10:33 crc kubenswrapper[5055]: I1011 09:10:33.370559 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="228ae19a9cf8309853615626c4eed37f26ed81d771575842aabd496e0ef6c2fe" exitCode=0 Oct 11 09:10:33 crc kubenswrapper[5055]: I1011 09:10:33.370659 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"228ae19a9cf8309853615626c4eed37f26ed81d771575842aabd496e0ef6c2fe"} Oct 11 09:10:33 crc kubenswrapper[5055]: I1011 09:10:33.370993 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerStarted","Data":"3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db"} Oct 11 09:10:33 crc kubenswrapper[5055]: I1011 09:10:33.371016 5055 scope.go:117] "RemoveContainer" containerID="4fce0b7d2d8d8a3d8b76aef8b77da8ffc9314a7260b41d10a39b925326b32f90" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.640124 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-7hvb8/must-gather-sbbg6"] Oct 11 09:12:12 crc kubenswrapper[5055]: E1011 09:12:12.640878 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11d7f507-0f50-4117-b61a-9684a0661af3" containerName="extract-utilities" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.640890 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="11d7f507-0f50-4117-b61a-9684a0661af3" containerName="extract-utilities" Oct 11 09:12:12 crc kubenswrapper[5055]: E1011 09:12:12.640912 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11d7f507-0f50-4117-b61a-9684a0661af3" containerName="registry-server" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.640917 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="11d7f507-0f50-4117-b61a-9684a0661af3" containerName="registry-server" Oct 11 09:12:12 crc kubenswrapper[5055]: E1011 09:12:12.640943 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11d7f507-0f50-4117-b61a-9684a0661af3" containerName="extract-content" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.640948 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="11d7f507-0f50-4117-b61a-9684a0661af3" containerName="extract-content" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.641088 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="11d7f507-0f50-4117-b61a-9684a0661af3" containerName="registry-server" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.641797 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7hvb8/must-gather-sbbg6" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.643239 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-7hvb8"/"openshift-service-ca.crt" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.643647 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-7hvb8"/"kube-root-ca.crt" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.645090 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-7hvb8"/"default-dockercfg-tbvj5" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.650227 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-7hvb8/must-gather-sbbg6"] Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.778476 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/280caae4-c1b8-498c-949e-5d852e525739-must-gather-output\") pod \"must-gather-sbbg6\" (UID: \"280caae4-c1b8-498c-949e-5d852e525739\") " pod="openshift-must-gather-7hvb8/must-gather-sbbg6" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.778799 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x97r2\" (UniqueName: \"kubernetes.io/projected/280caae4-c1b8-498c-949e-5d852e525739-kube-api-access-x97r2\") pod \"must-gather-sbbg6\" (UID: \"280caae4-c1b8-498c-949e-5d852e525739\") " pod="openshift-must-gather-7hvb8/must-gather-sbbg6" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.880577 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x97r2\" (UniqueName: \"kubernetes.io/projected/280caae4-c1b8-498c-949e-5d852e525739-kube-api-access-x97r2\") pod \"must-gather-sbbg6\" (UID: \"280caae4-c1b8-498c-949e-5d852e525739\") " pod="openshift-must-gather-7hvb8/must-gather-sbbg6" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.880698 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/280caae4-c1b8-498c-949e-5d852e525739-must-gather-output\") pod \"must-gather-sbbg6\" (UID: \"280caae4-c1b8-498c-949e-5d852e525739\") " pod="openshift-must-gather-7hvb8/must-gather-sbbg6" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.881339 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/280caae4-c1b8-498c-949e-5d852e525739-must-gather-output\") pod \"must-gather-sbbg6\" (UID: \"280caae4-c1b8-498c-949e-5d852e525739\") " pod="openshift-must-gather-7hvb8/must-gather-sbbg6" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.906599 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x97r2\" (UniqueName: \"kubernetes.io/projected/280caae4-c1b8-498c-949e-5d852e525739-kube-api-access-x97r2\") pod \"must-gather-sbbg6\" (UID: \"280caae4-c1b8-498c-949e-5d852e525739\") " pod="openshift-must-gather-7hvb8/must-gather-sbbg6" Oct 11 09:12:12 crc kubenswrapper[5055]: I1011 09:12:12.959864 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7hvb8/must-gather-sbbg6" Oct 11 09:12:13 crc kubenswrapper[5055]: I1011 09:12:13.490525 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-7hvb8/must-gather-sbbg6"] Oct 11 09:12:13 crc kubenswrapper[5055]: W1011 09:12:13.505897 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod280caae4_c1b8_498c_949e_5d852e525739.slice/crio-3d21c1551f0eaf9be1fade452f9be2c41bec0b4f447585249e044a725c4517c3 WatchSource:0}: Error finding container 3d21c1551f0eaf9be1fade452f9be2c41bec0b4f447585249e044a725c4517c3: Status 404 returned error can't find the container with id 3d21c1551f0eaf9be1fade452f9be2c41bec0b4f447585249e044a725c4517c3 Oct 11 09:12:13 crc kubenswrapper[5055]: I1011 09:12:13.507584 5055 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 11 09:12:14 crc kubenswrapper[5055]: I1011 09:12:14.260411 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7hvb8/must-gather-sbbg6" event={"ID":"280caae4-c1b8-498c-949e-5d852e525739","Type":"ContainerStarted","Data":"3d21c1551f0eaf9be1fade452f9be2c41bec0b4f447585249e044a725c4517c3"} Oct 11 09:12:17 crc kubenswrapper[5055]: I1011 09:12:17.286453 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7hvb8/must-gather-sbbg6" event={"ID":"280caae4-c1b8-498c-949e-5d852e525739","Type":"ContainerStarted","Data":"f34fbdd3ae8ebbb3d7bb61159a2b49d89531275fb490875a2e60bb1a2968dabb"} Oct 11 09:12:18 crc kubenswrapper[5055]: I1011 09:12:18.298031 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7hvb8/must-gather-sbbg6" event={"ID":"280caae4-c1b8-498c-949e-5d852e525739","Type":"ContainerStarted","Data":"2d74c51521c254870b9461129dbcf69399adc1f702b85c458336f76b574ec846"} Oct 11 09:12:18 crc kubenswrapper[5055]: I1011 09:12:18.333152 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-7hvb8/must-gather-sbbg6" podStartSLOduration=2.904024284 podStartE2EDuration="6.333134137s" podCreationTimestamp="2025-10-11 09:12:12 +0000 UTC" firstStartedPulling="2025-10-11 09:12:13.50732126 +0000 UTC m=+8317.281595067" lastFinishedPulling="2025-10-11 09:12:16.936431113 +0000 UTC m=+8320.710704920" observedRunningTime="2025-10-11 09:12:18.329511764 +0000 UTC m=+8322.103785561" watchObservedRunningTime="2025-10-11 09:12:18.333134137 +0000 UTC m=+8322.107407934" Oct 11 09:12:24 crc kubenswrapper[5055]: I1011 09:12:24.416805 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c7nds"] Oct 11 09:12:24 crc kubenswrapper[5055]: I1011 09:12:24.418903 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:24 crc kubenswrapper[5055]: I1011 09:12:24.426455 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c7nds"] Oct 11 09:12:24 crc kubenswrapper[5055]: I1011 09:12:24.455342 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-utilities\") pod \"redhat-operators-c7nds\" (UID: \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\") " pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:24 crc kubenswrapper[5055]: I1011 09:12:24.455552 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhx4p\" (UniqueName: \"kubernetes.io/projected/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-kube-api-access-vhx4p\") pod \"redhat-operators-c7nds\" (UID: \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\") " pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:24 crc kubenswrapper[5055]: I1011 09:12:24.455605 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-catalog-content\") pod \"redhat-operators-c7nds\" (UID: \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\") " pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:24 crc kubenswrapper[5055]: I1011 09:12:24.557372 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-utilities\") pod \"redhat-operators-c7nds\" (UID: \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\") " pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:24 crc kubenswrapper[5055]: I1011 09:12:24.557434 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhx4p\" (UniqueName: \"kubernetes.io/projected/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-kube-api-access-vhx4p\") pod \"redhat-operators-c7nds\" (UID: \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\") " pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:24 crc kubenswrapper[5055]: I1011 09:12:24.557466 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-catalog-content\") pod \"redhat-operators-c7nds\" (UID: \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\") " pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:24 crc kubenswrapper[5055]: I1011 09:12:24.558004 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-utilities\") pod \"redhat-operators-c7nds\" (UID: \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\") " pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:24 crc kubenswrapper[5055]: I1011 09:12:24.558050 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-catalog-content\") pod \"redhat-operators-c7nds\" (UID: \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\") " pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:24 crc kubenswrapper[5055]: I1011 09:12:24.577442 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhx4p\" (UniqueName: \"kubernetes.io/projected/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-kube-api-access-vhx4p\") pod \"redhat-operators-c7nds\" (UID: \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\") " pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:24 crc kubenswrapper[5055]: I1011 09:12:24.747966 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:25 crc kubenswrapper[5055]: I1011 09:12:25.124849 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c7nds"] Oct 11 09:12:25 crc kubenswrapper[5055]: W1011 09:12:25.132393 5055 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode0d5dd75_02a5_412b_ab59_af5fb6f7873b.slice/crio-3d100c7b4ee9d0725c67bb322b6cd57056786d35edfa934a4695b8e2475e67f3 WatchSource:0}: Error finding container 3d100c7b4ee9d0725c67bb322b6cd57056786d35edfa934a4695b8e2475e67f3: Status 404 returned error can't find the container with id 3d100c7b4ee9d0725c67bb322b6cd57056786d35edfa934a4695b8e2475e67f3 Oct 11 09:12:25 crc kubenswrapper[5055]: I1011 09:12:25.345949 5055 generic.go:334] "Generic (PLEG): container finished" podID="e0d5dd75-02a5-412b-ab59-af5fb6f7873b" containerID="148df11ef66f54f20bd02e39c8ac899f1de585697579b573f525cebbf188d4e6" exitCode=0 Oct 11 09:12:25 crc kubenswrapper[5055]: I1011 09:12:25.346052 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c7nds" event={"ID":"e0d5dd75-02a5-412b-ab59-af5fb6f7873b","Type":"ContainerDied","Data":"148df11ef66f54f20bd02e39c8ac899f1de585697579b573f525cebbf188d4e6"} Oct 11 09:12:25 crc kubenswrapper[5055]: I1011 09:12:25.346321 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c7nds" event={"ID":"e0d5dd75-02a5-412b-ab59-af5fb6f7873b","Type":"ContainerStarted","Data":"3d100c7b4ee9d0725c67bb322b6cd57056786d35edfa934a4695b8e2475e67f3"} Oct 11 09:12:27 crc kubenswrapper[5055]: I1011 09:12:27.361529 5055 generic.go:334] "Generic (PLEG): container finished" podID="e0d5dd75-02a5-412b-ab59-af5fb6f7873b" containerID="cbb5f5833a3cb9c77a4dc58c4bad4c8b0f391c8346de076ac10373c0de90a8e5" exitCode=0 Oct 11 09:12:27 crc kubenswrapper[5055]: I1011 09:12:27.361631 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c7nds" event={"ID":"e0d5dd75-02a5-412b-ab59-af5fb6f7873b","Type":"ContainerDied","Data":"cbb5f5833a3cb9c77a4dc58c4bad4c8b0f391c8346de076ac10373c0de90a8e5"} Oct 11 09:12:30 crc kubenswrapper[5055]: I1011 09:12:30.383832 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c7nds" event={"ID":"e0d5dd75-02a5-412b-ab59-af5fb6f7873b","Type":"ContainerStarted","Data":"f81a880661a126f6950d043c58cc0b2944d2b31f15cc7b2706b2775d8cd7dd83"} Oct 11 09:12:30 crc kubenswrapper[5055]: I1011 09:12:30.406635 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c7nds" podStartSLOduration=2.5103885889999997 podStartE2EDuration="6.406618306s" podCreationTimestamp="2025-10-11 09:12:24 +0000 UTC" firstStartedPulling="2025-10-11 09:12:25.347588727 +0000 UTC m=+8329.121862534" lastFinishedPulling="2025-10-11 09:12:29.243818444 +0000 UTC m=+8333.018092251" observedRunningTime="2025-10-11 09:12:30.400995756 +0000 UTC m=+8334.175269573" watchObservedRunningTime="2025-10-11 09:12:30.406618306 +0000 UTC m=+8334.180892103" Oct 11 09:12:32 crc kubenswrapper[5055]: I1011 09:12:32.422544 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 09:12:32 crc kubenswrapper[5055]: I1011 09:12:32.422622 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 09:12:34 crc kubenswrapper[5055]: I1011 09:12:34.748977 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:34 crc kubenswrapper[5055]: I1011 09:12:34.749059 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:34 crc kubenswrapper[5055]: I1011 09:12:34.794378 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:35 crc kubenswrapper[5055]: I1011 09:12:35.452670 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:35 crc kubenswrapper[5055]: I1011 09:12:35.501721 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c7nds"] Oct 11 09:12:37 crc kubenswrapper[5055]: I1011 09:12:37.429603 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c7nds" podUID="e0d5dd75-02a5-412b-ab59-af5fb6f7873b" containerName="registry-server" containerID="cri-o://f81a880661a126f6950d043c58cc0b2944d2b31f15cc7b2706b2775d8cd7dd83" gracePeriod=2 Oct 11 09:12:39 crc kubenswrapper[5055]: I1011 09:12:39.454508 5055 generic.go:334] "Generic (PLEG): container finished" podID="e0d5dd75-02a5-412b-ab59-af5fb6f7873b" containerID="f81a880661a126f6950d043c58cc0b2944d2b31f15cc7b2706b2775d8cd7dd83" exitCode=0 Oct 11 09:12:39 crc kubenswrapper[5055]: I1011 09:12:39.454604 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c7nds" event={"ID":"e0d5dd75-02a5-412b-ab59-af5fb6f7873b","Type":"ContainerDied","Data":"f81a880661a126f6950d043c58cc0b2944d2b31f15cc7b2706b2775d8cd7dd83"} Oct 11 09:12:39 crc kubenswrapper[5055]: I1011 09:12:39.785278 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:39 crc kubenswrapper[5055]: I1011 09:12:39.884234 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-catalog-content\") pod \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\" (UID: \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\") " Oct 11 09:12:39 crc kubenswrapper[5055]: I1011 09:12:39.884520 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhx4p\" (UniqueName: \"kubernetes.io/projected/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-kube-api-access-vhx4p\") pod \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\" (UID: \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\") " Oct 11 09:12:39 crc kubenswrapper[5055]: I1011 09:12:39.884645 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-utilities\") pod \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\" (UID: \"e0d5dd75-02a5-412b-ab59-af5fb6f7873b\") " Oct 11 09:12:39 crc kubenswrapper[5055]: I1011 09:12:39.885486 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-utilities" (OuterVolumeSpecName: "utilities") pod "e0d5dd75-02a5-412b-ab59-af5fb6f7873b" (UID: "e0d5dd75-02a5-412b-ab59-af5fb6f7873b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 09:12:39 crc kubenswrapper[5055]: I1011 09:12:39.904985 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-kube-api-access-vhx4p" (OuterVolumeSpecName: "kube-api-access-vhx4p") pod "e0d5dd75-02a5-412b-ab59-af5fb6f7873b" (UID: "e0d5dd75-02a5-412b-ab59-af5fb6f7873b"). InnerVolumeSpecName "kube-api-access-vhx4p". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 09:12:39 crc kubenswrapper[5055]: I1011 09:12:39.965667 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e0d5dd75-02a5-412b-ab59-af5fb6f7873b" (UID: "e0d5dd75-02a5-412b-ab59-af5fb6f7873b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 09:12:39 crc kubenswrapper[5055]: I1011 09:12:39.985875 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 09:12:39 crc kubenswrapper[5055]: I1011 09:12:39.985914 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhx4p\" (UniqueName: \"kubernetes.io/projected/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-kube-api-access-vhx4p\") on node \"crc\" DevicePath \"\"" Oct 11 09:12:39 crc kubenswrapper[5055]: I1011 09:12:39.985924 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0d5dd75-02a5-412b-ab59-af5fb6f7873b-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 09:12:40 crc kubenswrapper[5055]: I1011 09:12:40.463582 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c7nds" event={"ID":"e0d5dd75-02a5-412b-ab59-af5fb6f7873b","Type":"ContainerDied","Data":"3d100c7b4ee9d0725c67bb322b6cd57056786d35edfa934a4695b8e2475e67f3"} Oct 11 09:12:40 crc kubenswrapper[5055]: I1011 09:12:40.463639 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c7nds" Oct 11 09:12:40 crc kubenswrapper[5055]: I1011 09:12:40.464759 5055 scope.go:117] "RemoveContainer" containerID="f81a880661a126f6950d043c58cc0b2944d2b31f15cc7b2706b2775d8cd7dd83" Oct 11 09:12:40 crc kubenswrapper[5055]: I1011 09:12:40.484681 5055 scope.go:117] "RemoveContainer" containerID="cbb5f5833a3cb9c77a4dc58c4bad4c8b0f391c8346de076ac10373c0de90a8e5" Oct 11 09:12:40 crc kubenswrapper[5055]: I1011 09:12:40.503720 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c7nds"] Oct 11 09:12:40 crc kubenswrapper[5055]: I1011 09:12:40.508062 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-c7nds"] Oct 11 09:12:40 crc kubenswrapper[5055]: I1011 09:12:40.527716 5055 scope.go:117] "RemoveContainer" containerID="148df11ef66f54f20bd02e39c8ac899f1de585697579b573f525cebbf188d4e6" Oct 11 09:12:41 crc kubenswrapper[5055]: I1011 09:12:41.006282 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0d5dd75-02a5-412b-ab59-af5fb6f7873b" path="/var/lib/kubelet/pods/e0d5dd75-02a5-412b-ab59-af5fb6f7873b/volumes" Oct 11 09:13:02 crc kubenswrapper[5055]: I1011 09:13:02.422174 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 09:13:02 crc kubenswrapper[5055]: I1011 09:13:02.422691 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 09:13:08 crc kubenswrapper[5055]: I1011 09:13:08.635362 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-658bdf4b74-5ngjz_b3d55b74-0882-459a-be9a-2da659337819/kube-rbac-proxy/0.log" Oct 11 09:13:08 crc kubenswrapper[5055]: I1011 09:13:08.688199 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-658bdf4b74-5ngjz_b3d55b74-0882-459a-be9a-2da659337819/manager/0.log" Oct 11 09:13:08 crc kubenswrapper[5055]: I1011 09:13:08.786461 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl_ca031800-cd06-4d1b-bea6-cbd26e97b325/util/0.log" Oct 11 09:13:08 crc kubenswrapper[5055]: I1011 09:13:08.968715 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl_ca031800-cd06-4d1b-bea6-cbd26e97b325/pull/0.log" Oct 11 09:13:08 crc kubenswrapper[5055]: I1011 09:13:08.971272 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl_ca031800-cd06-4d1b-bea6-cbd26e97b325/util/0.log" Oct 11 09:13:08 crc kubenswrapper[5055]: I1011 09:13:08.975633 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl_ca031800-cd06-4d1b-bea6-cbd26e97b325/pull/0.log" Oct 11 09:13:09 crc kubenswrapper[5055]: I1011 09:13:09.124438 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl_ca031800-cd06-4d1b-bea6-cbd26e97b325/pull/0.log" Oct 11 09:13:09 crc kubenswrapper[5055]: I1011 09:13:09.144238 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl_ca031800-cd06-4d1b-bea6-cbd26e97b325/util/0.log" Oct 11 09:13:09 crc kubenswrapper[5055]: I1011 09:13:09.162720 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_bbf55ab9b6da9dfde4a224fc1e3f049ee7cb6cab839422fb52a09a365b6ktvl_ca031800-cd06-4d1b-bea6-cbd26e97b325/extract/0.log" Oct 11 09:13:09 crc kubenswrapper[5055]: I1011 09:13:09.305752 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-7b7fb68549-v7jqb_8e18a79a-705e-467e-b7cd-0fc0a90f1a7a/manager/0.log" Oct 11 09:13:09 crc kubenswrapper[5055]: I1011 09:13:09.346108 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-7b7fb68549-v7jqb_8e18a79a-705e-467e-b7cd-0fc0a90f1a7a/kube-rbac-proxy/0.log" Oct 11 09:13:09 crc kubenswrapper[5055]: I1011 09:13:09.381157 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-85d5d9dd78-zmrmw_cb2cd76d-3803-48d6-8882-9b895be3c494/kube-rbac-proxy/0.log" Oct 11 09:13:09 crc kubenswrapper[5055]: I1011 09:13:09.459661 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-85d5d9dd78-zmrmw_cb2cd76d-3803-48d6-8882-9b895be3c494/manager/0.log" Oct 11 09:13:09 crc kubenswrapper[5055]: I1011 09:13:09.529596 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-84b9b84486-bdvrf_ea4aab30-444b-4efe-a76d-bcd6c673fffe/kube-rbac-proxy/0.log" Oct 11 09:13:09 crc kubenswrapper[5055]: I1011 09:13:09.649388 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-84b9b84486-bdvrf_ea4aab30-444b-4efe-a76d-bcd6c673fffe/manager/0.log" Oct 11 09:13:09 crc kubenswrapper[5055]: I1011 09:13:09.683612 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-858f76bbdd-gxs22_d0538072-f53e-4d2a-858e-667657ffd09e/kube-rbac-proxy/0.log" Oct 11 09:13:09 crc kubenswrapper[5055]: I1011 09:13:09.743873 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-858f76bbdd-gxs22_d0538072-f53e-4d2a-858e-667657ffd09e/manager/0.log" Oct 11 09:13:09 crc kubenswrapper[5055]: I1011 09:13:09.842268 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7ffbcb7588-5zhf2_c6fd3cbf-29c5-4820-8936-2203cfa34345/manager/0.log" Oct 11 09:13:09 crc kubenswrapper[5055]: I1011 09:13:09.846808 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7ffbcb7588-5zhf2_c6fd3cbf-29c5-4820-8936-2203cfa34345/kube-rbac-proxy/0.log" Oct 11 09:13:09 crc kubenswrapper[5055]: I1011 09:13:09.976084 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-656bcbd775-rk8j7_5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5/kube-rbac-proxy/0.log" Oct 11 09:13:10 crc kubenswrapper[5055]: I1011 09:13:10.090605 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-9c5c78d49-jsbhj_a711f3ae-fd01-49ab-8349-18c12be42a20/kube-rbac-proxy/0.log" Oct 11 09:13:10 crc kubenswrapper[5055]: I1011 09:13:10.143269 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-656bcbd775-rk8j7_5d1e2ebf-0163-4b7b-9b8e-fb2e2a834fa5/manager/0.log" Oct 11 09:13:10 crc kubenswrapper[5055]: I1011 09:13:10.165995 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-9c5c78d49-jsbhj_a711f3ae-fd01-49ab-8349-18c12be42a20/manager/0.log" Oct 11 09:13:10 crc kubenswrapper[5055]: I1011 09:13:10.280121 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-55b6b7c7b8-nmq2v_d62acace-7cb6-44e0-8b54-761a9cbd9cfe/kube-rbac-proxy/0.log" Oct 11 09:13:10 crc kubenswrapper[5055]: I1011 09:13:10.352011 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5f67fbc655-9qdps_fc32b680-c849-4967-9a78-49c724018aa5/kube-rbac-proxy/0.log" Oct 11 09:13:10 crc kubenswrapper[5055]: I1011 09:13:10.418588 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-55b6b7c7b8-nmq2v_d62acace-7cb6-44e0-8b54-761a9cbd9cfe/manager/0.log" Oct 11 09:13:10 crc kubenswrapper[5055]: I1011 09:13:10.455054 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5f67fbc655-9qdps_fc32b680-c849-4967-9a78-49c724018aa5/manager/0.log" Oct 11 09:13:10 crc kubenswrapper[5055]: I1011 09:13:10.522167 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-f9fb45f8f-qhkjb_19a5d205-97f5-41fa-a9df-578a2c60549f/kube-rbac-proxy/0.log" Oct 11 09:13:10 crc kubenswrapper[5055]: I1011 09:13:10.572369 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-f9fb45f8f-qhkjb_19a5d205-97f5-41fa-a9df-578a2c60549f/manager/0.log" Oct 11 09:13:10 crc kubenswrapper[5055]: I1011 09:13:10.730102 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-79d585cb66-p6cwz_8e44a30e-0e4b-4560-b22a-755a8ae3af75/kube-rbac-proxy/0.log" Oct 11 09:13:10 crc kubenswrapper[5055]: I1011 09:13:10.795959 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-79d585cb66-p6cwz_8e44a30e-0e4b-4560-b22a-755a8ae3af75/manager/0.log" Oct 11 09:13:10 crc kubenswrapper[5055]: I1011 09:13:10.830762 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-5df598886f-54hjb_2e808412-35a4-431c-8ea1-96e0730a48bb/kube-rbac-proxy/0.log" Oct 11 09:13:10 crc kubenswrapper[5055]: I1011 09:13:10.991432 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-5df598886f-54hjb_2e808412-35a4-431c-8ea1-96e0730a48bb/manager/0.log" Oct 11 09:13:11 crc kubenswrapper[5055]: I1011 09:13:11.010839 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-69fdcfc5f5-spd9v_ae29373a-08c3-40d4-8f74-b92069a970bf/kube-rbac-proxy/0.log" Oct 11 09:13:11 crc kubenswrapper[5055]: I1011 09:13:11.027492 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-69fdcfc5f5-spd9v_ae29373a-08c3-40d4-8f74-b92069a970bf/manager/0.log" Oct 11 09:13:11 crc kubenswrapper[5055]: I1011 09:13:11.161464 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn_4b9b0bd4-6cb3-4970-823f-c942d9567b64/kube-rbac-proxy/0.log" Oct 11 09:13:11 crc kubenswrapper[5055]: I1011 09:13:11.197303 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5956dffb7bdmxcn_4b9b0bd4-6cb3-4970-823f-c942d9567b64/manager/0.log" Oct 11 09:13:11 crc kubenswrapper[5055]: I1011 09:13:11.317856 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-5b95c8954b-kwb47_a027701a-75f2-4640-a841-11dab6a3d4f9/kube-rbac-proxy/0.log" Oct 11 09:13:11 crc kubenswrapper[5055]: I1011 09:13:11.406434 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-688d597459-f26lm_bf25a169-f625-4758-9a83-4cd9acdcdffe/kube-rbac-proxy/0.log" Oct 11 09:13:11 crc kubenswrapper[5055]: I1011 09:13:11.659875 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-688d597459-f26lm_bf25a169-f625-4758-9a83-4cd9acdcdffe/operator/0.log" Oct 11 09:13:11 crc kubenswrapper[5055]: I1011 09:13:11.808331 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-t6qk8_716f5b93-0e21-4e3a-a15f-01f2a6ece1fa/registry-server/0.log" Oct 11 09:13:11 crc kubenswrapper[5055]: I1011 09:13:11.820145 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-79df5fb58c-lw5s2_d3bd380a-8fb6-4323-a11d-85b5c5123276/kube-rbac-proxy/0.log" Oct 11 09:13:11 crc kubenswrapper[5055]: I1011 09:13:11.920714 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-79df5fb58c-lw5s2_d3bd380a-8fb6-4323-a11d-85b5c5123276/manager/0.log" Oct 11 09:13:11 crc kubenswrapper[5055]: I1011 09:13:11.998307 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-68b6c87b68-xwbdv_a7abb018-37e9-4d57-90bf-e6333f7d252c/kube-rbac-proxy/0.log" Oct 11 09:13:12 crc kubenswrapper[5055]: I1011 09:13:12.083434 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-5b95c8954b-kwb47_a027701a-75f2-4640-a841-11dab6a3d4f9/manager/0.log" Oct 11 09:13:12 crc kubenswrapper[5055]: I1011 09:13:12.127059 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-68b6c87b68-xwbdv_a7abb018-37e9-4d57-90bf-e6333f7d252c/manager/0.log" Oct 11 09:13:12 crc kubenswrapper[5055]: I1011 09:13:12.192382 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-vpbs9_c06ae024-6af2-4435-b90b-d8c16d62aaf9/operator/0.log" Oct 11 09:13:12 crc kubenswrapper[5055]: I1011 09:13:12.253879 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-db6d7f97b-lqf4k_83d0866a-b853-4873-bf7e-8000c2f7a63a/kube-rbac-proxy/0.log" Oct 11 09:13:12 crc kubenswrapper[5055]: I1011 09:13:12.345257 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-db6d7f97b-lqf4k_83d0866a-b853-4873-bf7e-8000c2f7a63a/manager/0.log" Oct 11 09:13:12 crc kubenswrapper[5055]: I1011 09:13:12.376096 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-67cfc6749b-wm2wn_86f57e52-de78-48ca-86ec-69286b53726c/kube-rbac-proxy/0.log" Oct 11 09:13:12 crc kubenswrapper[5055]: I1011 09:13:12.510231 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-67cfc6749b-wm2wn_86f57e52-de78-48ca-86ec-69286b53726c/manager/0.log" Oct 11 09:13:12 crc kubenswrapper[5055]: I1011 09:13:12.568651 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5458f77c4-brxz6_587a08b9-a65b-43fc-9d11-80568dc93742/manager/0.log" Oct 11 09:13:12 crc kubenswrapper[5055]: I1011 09:13:12.595914 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5458f77c4-brxz6_587a08b9-a65b-43fc-9d11-80568dc93742/kube-rbac-proxy/0.log" Oct 11 09:13:12 crc kubenswrapper[5055]: I1011 09:13:12.650443 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-7f554bff7b-lf2lh_8cac90ea-fff7-4955-89bf-984d1c0f3094/kube-rbac-proxy/0.log" Oct 11 09:13:12 crc kubenswrapper[5055]: I1011 09:13:12.681577 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-7f554bff7b-lf2lh_8cac90ea-fff7-4955-89bf-984d1c0f3094/manager/0.log" Oct 11 09:13:27 crc kubenswrapper[5055]: I1011 09:13:27.553319 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-5x9qp_2ed68501-b6df-40a5-b58b-669bb8ff37d6/control-plane-machine-set-operator/0.log" Oct 11 09:13:27 crc kubenswrapper[5055]: I1011 09:13:27.756328 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-bdszt_199dbd10-cbbc-4f91-bc11-6a7ea9dc6609/kube-rbac-proxy/0.log" Oct 11 09:13:27 crc kubenswrapper[5055]: I1011 09:13:27.758162 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-bdszt_199dbd10-cbbc-4f91-bc11-6a7ea9dc6609/machine-api-operator/0.log" Oct 11 09:13:32 crc kubenswrapper[5055]: I1011 09:13:32.422757 5055 patch_prober.go:28] interesting pod/machine-config-daemon-qtqvf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 11 09:13:32 crc kubenswrapper[5055]: I1011 09:13:32.423064 5055 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 11 09:13:32 crc kubenswrapper[5055]: I1011 09:13:32.423108 5055 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" Oct 11 09:13:32 crc kubenswrapper[5055]: I1011 09:13:32.423629 5055 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db"} pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 11 09:13:32 crc kubenswrapper[5055]: I1011 09:13:32.423677 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" containerName="machine-config-daemon" containerID="cri-o://3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" gracePeriod=600 Oct 11 09:13:32 crc kubenswrapper[5055]: E1011 09:13:32.542585 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:13:32 crc kubenswrapper[5055]: I1011 09:13:32.817802 5055 generic.go:334] "Generic (PLEG): container finished" podID="46789346-5a88-43a0-ad63-b530185c8ca1" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" exitCode=0 Oct 11 09:13:32 crc kubenswrapper[5055]: I1011 09:13:32.817851 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" event={"ID":"46789346-5a88-43a0-ad63-b530185c8ca1","Type":"ContainerDied","Data":"3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db"} Oct 11 09:13:32 crc kubenswrapper[5055]: I1011 09:13:32.817883 5055 scope.go:117] "RemoveContainer" containerID="228ae19a9cf8309853615626c4eed37f26ed81d771575842aabd496e0ef6c2fe" Oct 11 09:13:32 crc kubenswrapper[5055]: I1011 09:13:32.818595 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:13:32 crc kubenswrapper[5055]: E1011 09:13:32.818849 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:13:38 crc kubenswrapper[5055]: I1011 09:13:38.275790 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-7d4cc89fcb-5gv8v_733b591a-c173-4ea1-8f0a-4b48ec3f3a49/cert-manager-controller/0.log" Oct 11 09:13:38 crc kubenswrapper[5055]: I1011 09:13:38.442929 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7d9f95dbf-zvrrq_bc9180a6-db2c-45cc-aafa-285a56562abc/cert-manager-cainjector/0.log" Oct 11 09:13:38 crc kubenswrapper[5055]: I1011 09:13:38.507469 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-d969966f-lqblm_f879cfe7-12ac-431e-a7d3-a3c3bff29401/cert-manager-webhook/0.log" Oct 11 09:13:43 crc kubenswrapper[5055]: I1011 09:13:43.994148 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:13:43 crc kubenswrapper[5055]: E1011 09:13:43.995150 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:13:50 crc kubenswrapper[5055]: I1011 09:13:50.305533 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-6b874cbd85-fp454_b2155c2a-94c5-4dd0-8960-a40eda21c6b2/nmstate-console-plugin/0.log" Oct 11 09:13:50 crc kubenswrapper[5055]: I1011 09:13:50.429337 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-n4knm_20c5f869-3113-4877-8052-73033c2200ba/nmstate-handler/0.log" Oct 11 09:13:50 crc kubenswrapper[5055]: I1011 09:13:50.501889 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-fdff9cb8d-kprdt_a47eeb98-80a9-4ee6-8175-cf40a0e5a59d/nmstate-metrics/0.log" Oct 11 09:13:50 crc kubenswrapper[5055]: I1011 09:13:50.528953 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-fdff9cb8d-kprdt_a47eeb98-80a9-4ee6-8175-cf40a0e5a59d/kube-rbac-proxy/0.log" Oct 11 09:13:50 crc kubenswrapper[5055]: I1011 09:13:50.711449 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6cdbc54649-xh8hj_47925cdb-bc01-447e-9d57-277acb520901/nmstate-webhook/0.log" Oct 11 09:13:50 crc kubenswrapper[5055]: I1011 09:13:50.724022 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-858ddd8f98-p9blj_82160f7a-cf39-40a9-904f-4343de88371c/nmstate-operator/0.log" Oct 11 09:13:57 crc kubenswrapper[5055]: I1011 09:13:57.001741 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:13:57 crc kubenswrapper[5055]: E1011 09:13:57.002729 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:14:04 crc kubenswrapper[5055]: I1011 09:14:04.046746 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-68d546b9d8-r94c9_89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7/kube-rbac-proxy/0.log" Oct 11 09:14:04 crc kubenswrapper[5055]: I1011 09:14:04.281223 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/cp-frr-files/0.log" Oct 11 09:14:04 crc kubenswrapper[5055]: I1011 09:14:04.431271 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-68d546b9d8-r94c9_89f1b37c-5214-4b4d-b2a6-3543ef7bd2e7/controller/0.log" Oct 11 09:14:04 crc kubenswrapper[5055]: I1011 09:14:04.435057 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/cp-reloader/0.log" Oct 11 09:14:04 crc kubenswrapper[5055]: I1011 09:14:04.450434 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/cp-metrics/0.log" Oct 11 09:14:04 crc kubenswrapper[5055]: I1011 09:14:04.480851 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/cp-frr-files/0.log" Oct 11 09:14:04 crc kubenswrapper[5055]: I1011 09:14:04.578010 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/cp-reloader/0.log" Oct 11 09:14:04 crc kubenswrapper[5055]: I1011 09:14:04.771394 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/cp-metrics/0.log" Oct 11 09:14:04 crc kubenswrapper[5055]: I1011 09:14:04.771471 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/cp-metrics/0.log" Oct 11 09:14:04 crc kubenswrapper[5055]: I1011 09:14:04.837730 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/cp-frr-files/0.log" Oct 11 09:14:04 crc kubenswrapper[5055]: I1011 09:14:04.841202 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/cp-reloader/0.log" Oct 11 09:14:04 crc kubenswrapper[5055]: I1011 09:14:04.953652 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/cp-reloader/0.log" Oct 11 09:14:04 crc kubenswrapper[5055]: I1011 09:14:04.981793 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/cp-frr-files/0.log" Oct 11 09:14:05 crc kubenswrapper[5055]: I1011 09:14:05.043198 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/controller/0.log" Oct 11 09:14:05 crc kubenswrapper[5055]: I1011 09:14:05.051562 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/cp-metrics/0.log" Oct 11 09:14:05 crc kubenswrapper[5055]: I1011 09:14:05.183837 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/frr-metrics/0.log" Oct 11 09:14:05 crc kubenswrapper[5055]: I1011 09:14:05.209698 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/kube-rbac-proxy/0.log" Oct 11 09:14:05 crc kubenswrapper[5055]: I1011 09:14:05.279203 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/kube-rbac-proxy-frr/0.log" Oct 11 09:14:05 crc kubenswrapper[5055]: I1011 09:14:05.401190 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/reloader/0.log" Oct 11 09:14:05 crc kubenswrapper[5055]: I1011 09:14:05.478953 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-64bf5d555-p2ns9_10b6afa5-8fa5-4580-8da4-1ba5ad7adca1/frr-k8s-webhook-server/0.log" Oct 11 09:14:05 crc kubenswrapper[5055]: I1011 09:14:05.623185 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-8479555c6b-xzdfd_d3f5f61f-8260-48f5-8dd7-7c139cbad658/manager/0.log" Oct 11 09:14:05 crc kubenswrapper[5055]: I1011 09:14:05.821280 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-6bdb447db-9zwvv_efefbb28-ed08-46fe-ac6b-e819db7e3b0b/webhook-server/0.log" Oct 11 09:14:05 crc kubenswrapper[5055]: I1011 09:14:05.870202 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-tlblh_40eb46a9-72ab-4076-a44a-e76267ebb92c/kube-rbac-proxy/0.log" Oct 11 09:14:06 crc kubenswrapper[5055]: I1011 09:14:06.397213 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-tlblh_40eb46a9-72ab-4076-a44a-e76267ebb92c/speaker/0.log" Oct 11 09:14:06 crc kubenswrapper[5055]: I1011 09:14:06.721957 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5fz6w_4d7343fd-2840-432d-aa04-4a8330fa4b09/frr/0.log" Oct 11 09:14:08 crc kubenswrapper[5055]: I1011 09:14:08.993861 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:14:08 crc kubenswrapper[5055]: E1011 09:14:08.994463 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:14:17 crc kubenswrapper[5055]: I1011 09:14:17.899830 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws_a970f2fd-e0e0-4837-88e5-09a8b071309b/util/0.log" Oct 11 09:14:18 crc kubenswrapper[5055]: I1011 09:14:18.083691 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws_a970f2fd-e0e0-4837-88e5-09a8b071309b/pull/0.log" Oct 11 09:14:18 crc kubenswrapper[5055]: I1011 09:14:18.088845 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws_a970f2fd-e0e0-4837-88e5-09a8b071309b/util/0.log" Oct 11 09:14:18 crc kubenswrapper[5055]: I1011 09:14:18.161379 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws_a970f2fd-e0e0-4837-88e5-09a8b071309b/pull/0.log" Oct 11 09:14:18 crc kubenswrapper[5055]: I1011 09:14:18.273245 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws_a970f2fd-e0e0-4837-88e5-09a8b071309b/util/0.log" Oct 11 09:14:18 crc kubenswrapper[5055]: I1011 09:14:18.276590 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws_a970f2fd-e0e0-4837-88e5-09a8b071309b/extract/0.log" Oct 11 09:14:18 crc kubenswrapper[5055]: I1011 09:14:18.311487 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_695e9552c02c72940c72621f824780f00ca58086c3badc308bf0a2eb69wrfws_a970f2fd-e0e0-4837-88e5-09a8b071309b/pull/0.log" Oct 11 09:14:18 crc kubenswrapper[5055]: I1011 09:14:18.438123 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq_04210186-75b3-4eaa-9eb6-f4b5c41af23a/util/0.log" Oct 11 09:14:18 crc kubenswrapper[5055]: I1011 09:14:18.579498 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq_04210186-75b3-4eaa-9eb6-f4b5c41af23a/util/0.log" Oct 11 09:14:18 crc kubenswrapper[5055]: I1011 09:14:18.581167 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq_04210186-75b3-4eaa-9eb6-f4b5c41af23a/pull/0.log" Oct 11 09:14:18 crc kubenswrapper[5055]: I1011 09:14:18.612710 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq_04210186-75b3-4eaa-9eb6-f4b5c41af23a/pull/0.log" Oct 11 09:14:18 crc kubenswrapper[5055]: I1011 09:14:18.759920 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq_04210186-75b3-4eaa-9eb6-f4b5c41af23a/util/0.log" Oct 11 09:14:18 crc kubenswrapper[5055]: I1011 09:14:18.771084 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq_04210186-75b3-4eaa-9eb6-f4b5c41af23a/pull/0.log" Oct 11 09:14:18 crc kubenswrapper[5055]: I1011 09:14:18.780646 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d2fbxjq_04210186-75b3-4eaa-9eb6-f4b5c41af23a/extract/0.log" Oct 11 09:14:18 crc kubenswrapper[5055]: I1011 09:14:18.933057 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-w4m8t_b4326943-ad56-4da8-b6fd-df9d1f5b5cf6/extract-utilities/0.log" Oct 11 09:14:19 crc kubenswrapper[5055]: I1011 09:14:19.067759 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-w4m8t_b4326943-ad56-4da8-b6fd-df9d1f5b5cf6/extract-utilities/0.log" Oct 11 09:14:19 crc kubenswrapper[5055]: I1011 09:14:19.069805 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-w4m8t_b4326943-ad56-4da8-b6fd-df9d1f5b5cf6/extract-content/0.log" Oct 11 09:14:19 crc kubenswrapper[5055]: I1011 09:14:19.103879 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-w4m8t_b4326943-ad56-4da8-b6fd-df9d1f5b5cf6/extract-content/0.log" Oct 11 09:14:19 crc kubenswrapper[5055]: I1011 09:14:19.259031 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-w4m8t_b4326943-ad56-4da8-b6fd-df9d1f5b5cf6/extract-utilities/0.log" Oct 11 09:14:19 crc kubenswrapper[5055]: I1011 09:14:19.265144 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-w4m8t_b4326943-ad56-4da8-b6fd-df9d1f5b5cf6/extract-content/0.log" Oct 11 09:14:19 crc kubenswrapper[5055]: I1011 09:14:19.464514 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6l6qh_b4aa6bf6-796d-44cf-8c57-c2ef36885910/extract-utilities/0.log" Oct 11 09:14:19 crc kubenswrapper[5055]: I1011 09:14:19.651142 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6l6qh_b4aa6bf6-796d-44cf-8c57-c2ef36885910/extract-utilities/0.log" Oct 11 09:14:19 crc kubenswrapper[5055]: I1011 09:14:19.713225 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6l6qh_b4aa6bf6-796d-44cf-8c57-c2ef36885910/extract-content/0.log" Oct 11 09:14:19 crc kubenswrapper[5055]: I1011 09:14:19.713318 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6l6qh_b4aa6bf6-796d-44cf-8c57-c2ef36885910/extract-content/0.log" Oct 11 09:14:19 crc kubenswrapper[5055]: I1011 09:14:19.908276 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6l6qh_b4aa6bf6-796d-44cf-8c57-c2ef36885910/extract-content/0.log" Oct 11 09:14:19 crc kubenswrapper[5055]: I1011 09:14:19.932310 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6l6qh_b4aa6bf6-796d-44cf-8c57-c2ef36885910/extract-utilities/0.log" Oct 11 09:14:20 crc kubenswrapper[5055]: I1011 09:14:20.182915 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85_5d98516c-9175-49d7-aa13-c9dd8fb2ea57/util/0.log" Oct 11 09:14:20 crc kubenswrapper[5055]: I1011 09:14:20.310876 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85_5d98516c-9175-49d7-aa13-c9dd8fb2ea57/util/0.log" Oct 11 09:14:20 crc kubenswrapper[5055]: I1011 09:14:20.346654 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85_5d98516c-9175-49d7-aa13-c9dd8fb2ea57/pull/0.log" Oct 11 09:14:20 crc kubenswrapper[5055]: I1011 09:14:20.409354 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-w4m8t_b4326943-ad56-4da8-b6fd-df9d1f5b5cf6/registry-server/0.log" Oct 11 09:14:20 crc kubenswrapper[5055]: I1011 09:14:20.541286 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85_5d98516c-9175-49d7-aa13-c9dd8fb2ea57/pull/0.log" Oct 11 09:14:20 crc kubenswrapper[5055]: I1011 09:14:20.957539 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85_5d98516c-9175-49d7-aa13-c9dd8fb2ea57/pull/0.log" Oct 11 09:14:20 crc kubenswrapper[5055]: I1011 09:14:20.972014 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85_5d98516c-9175-49d7-aa13-c9dd8fb2ea57/extract/0.log" Oct 11 09:14:20 crc kubenswrapper[5055]: I1011 09:14:20.976756 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6l6qh_b4aa6bf6-796d-44cf-8c57-c2ef36885910/registry-server/0.log" Oct 11 09:14:20 crc kubenswrapper[5055]: I1011 09:14:20.990231 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835cq2h85_5d98516c-9175-49d7-aa13-c9dd8fb2ea57/util/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.123690 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-k25qp_1b333b58-deda-4c73-836d-a12c53f3adf2/extract-utilities/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.143735 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-9qmg4_7530e962-328a-4b9c-8a07-c2f055845eda/marketplace-operator/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.306065 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-k25qp_1b333b58-deda-4c73-836d-a12c53f3adf2/extract-utilities/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.323806 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-k25qp_1b333b58-deda-4c73-836d-a12c53f3adf2/extract-content/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.337904 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-k25qp_1b333b58-deda-4c73-836d-a12c53f3adf2/extract-content/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.473271 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-k25qp_1b333b58-deda-4c73-836d-a12c53f3adf2/extract-content/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.484928 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-k25qp_1b333b58-deda-4c73-836d-a12c53f3adf2/extract-utilities/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.544101 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pqw42_aa53a9b5-e5f4-44e4-b48c-b4f517b959a9/extract-utilities/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.733493 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-k25qp_1b333b58-deda-4c73-836d-a12c53f3adf2/registry-server/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.751503 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pqw42_aa53a9b5-e5f4-44e4-b48c-b4f517b959a9/extract-utilities/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.768208 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pqw42_aa53a9b5-e5f4-44e4-b48c-b4f517b959a9/extract-content/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.782652 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pqw42_aa53a9b5-e5f4-44e4-b48c-b4f517b959a9/extract-content/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.923640 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pqw42_aa53a9b5-e5f4-44e4-b48c-b4f517b959a9/extract-utilities/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.933286 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pqw42_aa53a9b5-e5f4-44e4-b48c-b4f517b959a9/extract-content/0.log" Oct 11 09:14:21 crc kubenswrapper[5055]: I1011 09:14:21.993225 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:14:21 crc kubenswrapper[5055]: E1011 09:14:21.993587 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:14:22 crc kubenswrapper[5055]: I1011 09:14:22.802486 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pqw42_aa53a9b5-e5f4-44e4-b48c-b4f517b959a9/registry-server/0.log" Oct 11 09:14:34 crc kubenswrapper[5055]: I1011 09:14:34.993126 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:14:34 crc kubenswrapper[5055]: E1011 09:14:34.993983 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:14:49 crc kubenswrapper[5055]: I1011 09:14:49.993117 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:14:49 crc kubenswrapper[5055]: E1011 09:14:49.993821 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.178848 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq"] Oct 11 09:15:00 crc kubenswrapper[5055]: E1011 09:15:00.179912 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0d5dd75-02a5-412b-ab59-af5fb6f7873b" containerName="registry-server" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.179930 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0d5dd75-02a5-412b-ab59-af5fb6f7873b" containerName="registry-server" Oct 11 09:15:00 crc kubenswrapper[5055]: E1011 09:15:00.179949 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0d5dd75-02a5-412b-ab59-af5fb6f7873b" containerName="extract-utilities" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.179957 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0d5dd75-02a5-412b-ab59-af5fb6f7873b" containerName="extract-utilities" Oct 11 09:15:00 crc kubenswrapper[5055]: E1011 09:15:00.179973 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0d5dd75-02a5-412b-ab59-af5fb6f7873b" containerName="extract-content" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.179982 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0d5dd75-02a5-412b-ab59-af5fb6f7873b" containerName="extract-content" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.180205 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0d5dd75-02a5-412b-ab59-af5fb6f7873b" containerName="registry-server" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.180909 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.183832 5055 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.184344 5055 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.192283 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq"] Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.375089 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8m67j\" (UniqueName: \"kubernetes.io/projected/5332f844-86cd-48c6-b808-5fdc280bcdb2-kube-api-access-8m67j\") pod \"collect-profiles-29336235-799vq\" (UID: \"5332f844-86cd-48c6-b808-5fdc280bcdb2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.375230 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5332f844-86cd-48c6-b808-5fdc280bcdb2-config-volume\") pod \"collect-profiles-29336235-799vq\" (UID: \"5332f844-86cd-48c6-b808-5fdc280bcdb2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.375299 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5332f844-86cd-48c6-b808-5fdc280bcdb2-secret-volume\") pod \"collect-profiles-29336235-799vq\" (UID: \"5332f844-86cd-48c6-b808-5fdc280bcdb2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.476647 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5332f844-86cd-48c6-b808-5fdc280bcdb2-secret-volume\") pod \"collect-profiles-29336235-799vq\" (UID: \"5332f844-86cd-48c6-b808-5fdc280bcdb2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.476740 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8m67j\" (UniqueName: \"kubernetes.io/projected/5332f844-86cd-48c6-b808-5fdc280bcdb2-kube-api-access-8m67j\") pod \"collect-profiles-29336235-799vq\" (UID: \"5332f844-86cd-48c6-b808-5fdc280bcdb2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.476827 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5332f844-86cd-48c6-b808-5fdc280bcdb2-config-volume\") pod \"collect-profiles-29336235-799vq\" (UID: \"5332f844-86cd-48c6-b808-5fdc280bcdb2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.477723 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5332f844-86cd-48c6-b808-5fdc280bcdb2-config-volume\") pod \"collect-profiles-29336235-799vq\" (UID: \"5332f844-86cd-48c6-b808-5fdc280bcdb2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.499117 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8m67j\" (UniqueName: \"kubernetes.io/projected/5332f844-86cd-48c6-b808-5fdc280bcdb2-kube-api-access-8m67j\") pod \"collect-profiles-29336235-799vq\" (UID: \"5332f844-86cd-48c6-b808-5fdc280bcdb2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.500311 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5332f844-86cd-48c6-b808-5fdc280bcdb2-secret-volume\") pod \"collect-profiles-29336235-799vq\" (UID: \"5332f844-86cd-48c6-b808-5fdc280bcdb2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.504742 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" Oct 11 09:15:00 crc kubenswrapper[5055]: I1011 09:15:00.938002 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq"] Oct 11 09:15:01 crc kubenswrapper[5055]: I1011 09:15:01.475668 5055 generic.go:334] "Generic (PLEG): container finished" podID="5332f844-86cd-48c6-b808-5fdc280bcdb2" containerID="1d695ea2cc327a0677648a3d51aeeca16695319772ede528287bc63cc4a66f86" exitCode=0 Oct 11 09:15:01 crc kubenswrapper[5055]: I1011 09:15:01.475709 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" event={"ID":"5332f844-86cd-48c6-b808-5fdc280bcdb2","Type":"ContainerDied","Data":"1d695ea2cc327a0677648a3d51aeeca16695319772ede528287bc63cc4a66f86"} Oct 11 09:15:01 crc kubenswrapper[5055]: I1011 09:15:01.475746 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" event={"ID":"5332f844-86cd-48c6-b808-5fdc280bcdb2","Type":"ContainerStarted","Data":"984bafe8fe37cbe50dd43f111f1689c27862e74fb1f8cfb0b75a61d6c350fd08"} Oct 11 09:15:02 crc kubenswrapper[5055]: I1011 09:15:02.856134 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" Oct 11 09:15:02 crc kubenswrapper[5055]: I1011 09:15:02.995327 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:15:02 crc kubenswrapper[5055]: E1011 09:15:02.995961 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:15:03 crc kubenswrapper[5055]: I1011 09:15:03.012513 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5332f844-86cd-48c6-b808-5fdc280bcdb2-config-volume\") pod \"5332f844-86cd-48c6-b808-5fdc280bcdb2\" (UID: \"5332f844-86cd-48c6-b808-5fdc280bcdb2\") " Oct 11 09:15:03 crc kubenswrapper[5055]: I1011 09:15:03.012630 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8m67j\" (UniqueName: \"kubernetes.io/projected/5332f844-86cd-48c6-b808-5fdc280bcdb2-kube-api-access-8m67j\") pod \"5332f844-86cd-48c6-b808-5fdc280bcdb2\" (UID: \"5332f844-86cd-48c6-b808-5fdc280bcdb2\") " Oct 11 09:15:03 crc kubenswrapper[5055]: I1011 09:15:03.012756 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5332f844-86cd-48c6-b808-5fdc280bcdb2-secret-volume\") pod \"5332f844-86cd-48c6-b808-5fdc280bcdb2\" (UID: \"5332f844-86cd-48c6-b808-5fdc280bcdb2\") " Oct 11 09:15:03 crc kubenswrapper[5055]: I1011 09:15:03.013286 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5332f844-86cd-48c6-b808-5fdc280bcdb2-config-volume" (OuterVolumeSpecName: "config-volume") pod "5332f844-86cd-48c6-b808-5fdc280bcdb2" (UID: "5332f844-86cd-48c6-b808-5fdc280bcdb2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 11 09:15:03 crc kubenswrapper[5055]: I1011 09:15:03.018162 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5332f844-86cd-48c6-b808-5fdc280bcdb2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5332f844-86cd-48c6-b808-5fdc280bcdb2" (UID: "5332f844-86cd-48c6-b808-5fdc280bcdb2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 11 09:15:03 crc kubenswrapper[5055]: I1011 09:15:03.037746 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5332f844-86cd-48c6-b808-5fdc280bcdb2-kube-api-access-8m67j" (OuterVolumeSpecName: "kube-api-access-8m67j") pod "5332f844-86cd-48c6-b808-5fdc280bcdb2" (UID: "5332f844-86cd-48c6-b808-5fdc280bcdb2"). InnerVolumeSpecName "kube-api-access-8m67j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 09:15:03 crc kubenswrapper[5055]: I1011 09:15:03.113812 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8m67j\" (UniqueName: \"kubernetes.io/projected/5332f844-86cd-48c6-b808-5fdc280bcdb2-kube-api-access-8m67j\") on node \"crc\" DevicePath \"\"" Oct 11 09:15:03 crc kubenswrapper[5055]: I1011 09:15:03.113843 5055 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5332f844-86cd-48c6-b808-5fdc280bcdb2-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 11 09:15:03 crc kubenswrapper[5055]: I1011 09:15:03.113853 5055 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5332f844-86cd-48c6-b808-5fdc280bcdb2-config-volume\") on node \"crc\" DevicePath \"\"" Oct 11 09:15:03 crc kubenswrapper[5055]: I1011 09:15:03.495156 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" event={"ID":"5332f844-86cd-48c6-b808-5fdc280bcdb2","Type":"ContainerDied","Data":"984bafe8fe37cbe50dd43f111f1689c27862e74fb1f8cfb0b75a61d6c350fd08"} Oct 11 09:15:03 crc kubenswrapper[5055]: I1011 09:15:03.495218 5055 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="984bafe8fe37cbe50dd43f111f1689c27862e74fb1f8cfb0b75a61d6c350fd08" Oct 11 09:15:03 crc kubenswrapper[5055]: I1011 09:15:03.495236 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29336235-799vq" Oct 11 09:15:03 crc kubenswrapper[5055]: I1011 09:15:03.945193 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln"] Oct 11 09:15:03 crc kubenswrapper[5055]: I1011 09:15:03.952022 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29336190-b8xln"] Oct 11 09:15:05 crc kubenswrapper[5055]: I1011 09:15:05.000998 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="295550b8-f3fb-4687-a147-c3ee81bb20a3" path="/var/lib/kubelet/pods/295550b8-f3fb-4687-a147-c3ee81bb20a3/volumes" Oct 11 09:15:16 crc kubenswrapper[5055]: I1011 09:15:16.996304 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:15:16 crc kubenswrapper[5055]: E1011 09:15:16.996881 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:15:23 crc kubenswrapper[5055]: I1011 09:15:23.660071 5055 generic.go:334] "Generic (PLEG): container finished" podID="280caae4-c1b8-498c-949e-5d852e525739" containerID="f34fbdd3ae8ebbb3d7bb61159a2b49d89531275fb490875a2e60bb1a2968dabb" exitCode=0 Oct 11 09:15:23 crc kubenswrapper[5055]: I1011 09:15:23.660186 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7hvb8/must-gather-sbbg6" event={"ID":"280caae4-c1b8-498c-949e-5d852e525739","Type":"ContainerDied","Data":"f34fbdd3ae8ebbb3d7bb61159a2b49d89531275fb490875a2e60bb1a2968dabb"} Oct 11 09:15:23 crc kubenswrapper[5055]: I1011 09:15:23.661400 5055 scope.go:117] "RemoveContainer" containerID="f34fbdd3ae8ebbb3d7bb61159a2b49d89531275fb490875a2e60bb1a2968dabb" Oct 11 09:15:24 crc kubenswrapper[5055]: I1011 09:15:24.235846 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-7hvb8_must-gather-sbbg6_280caae4-c1b8-498c-949e-5d852e525739/gather/0.log" Oct 11 09:15:29 crc kubenswrapper[5055]: I1011 09:15:29.994326 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:15:29 crc kubenswrapper[5055]: E1011 09:15:29.995209 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.133543 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-7hvb8/must-gather-sbbg6"] Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.133866 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-7hvb8/must-gather-sbbg6" podUID="280caae4-c1b8-498c-949e-5d852e525739" containerName="copy" containerID="cri-o://2d74c51521c254870b9461129dbcf69399adc1f702b85c458336f76b574ec846" gracePeriod=2 Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.139960 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-7hvb8/must-gather-sbbg6"] Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.495332 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-7hvb8_must-gather-sbbg6_280caae4-c1b8-498c-949e-5d852e525739/copy/0.log" Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.496000 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7hvb8/must-gather-sbbg6" Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.538634 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x97r2\" (UniqueName: \"kubernetes.io/projected/280caae4-c1b8-498c-949e-5d852e525739-kube-api-access-x97r2\") pod \"280caae4-c1b8-498c-949e-5d852e525739\" (UID: \"280caae4-c1b8-498c-949e-5d852e525739\") " Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.538821 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/280caae4-c1b8-498c-949e-5d852e525739-must-gather-output\") pod \"280caae4-c1b8-498c-949e-5d852e525739\" (UID: \"280caae4-c1b8-498c-949e-5d852e525739\") " Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.545979 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/280caae4-c1b8-498c-949e-5d852e525739-kube-api-access-x97r2" (OuterVolumeSpecName: "kube-api-access-x97r2") pod "280caae4-c1b8-498c-949e-5d852e525739" (UID: "280caae4-c1b8-498c-949e-5d852e525739"). InnerVolumeSpecName "kube-api-access-x97r2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.621295 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/280caae4-c1b8-498c-949e-5d852e525739-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "280caae4-c1b8-498c-949e-5d852e525739" (UID: "280caae4-c1b8-498c-949e-5d852e525739"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.641039 5055 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/280caae4-c1b8-498c-949e-5d852e525739-must-gather-output\") on node \"crc\" DevicePath \"\"" Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.641096 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x97r2\" (UniqueName: \"kubernetes.io/projected/280caae4-c1b8-498c-949e-5d852e525739-kube-api-access-x97r2\") on node \"crc\" DevicePath \"\"" Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.753207 5055 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-7hvb8_must-gather-sbbg6_280caae4-c1b8-498c-949e-5d852e525739/copy/0.log" Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.753645 5055 generic.go:334] "Generic (PLEG): container finished" podID="280caae4-c1b8-498c-949e-5d852e525739" containerID="2d74c51521c254870b9461129dbcf69399adc1f702b85c458336f76b574ec846" exitCode=143 Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.753734 5055 scope.go:117] "RemoveContainer" containerID="2d74c51521c254870b9461129dbcf69399adc1f702b85c458336f76b574ec846" Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.753755 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7hvb8/must-gather-sbbg6" Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.783960 5055 scope.go:117] "RemoveContainer" containerID="f34fbdd3ae8ebbb3d7bb61159a2b49d89531275fb490875a2e60bb1a2968dabb" Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.864205 5055 scope.go:117] "RemoveContainer" containerID="2d74c51521c254870b9461129dbcf69399adc1f702b85c458336f76b574ec846" Oct 11 09:15:31 crc kubenswrapper[5055]: E1011 09:15:31.865679 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d74c51521c254870b9461129dbcf69399adc1f702b85c458336f76b574ec846\": container with ID starting with 2d74c51521c254870b9461129dbcf69399adc1f702b85c458336f76b574ec846 not found: ID does not exist" containerID="2d74c51521c254870b9461129dbcf69399adc1f702b85c458336f76b574ec846" Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.865733 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d74c51521c254870b9461129dbcf69399adc1f702b85c458336f76b574ec846"} err="failed to get container status \"2d74c51521c254870b9461129dbcf69399adc1f702b85c458336f76b574ec846\": rpc error: code = NotFound desc = could not find container \"2d74c51521c254870b9461129dbcf69399adc1f702b85c458336f76b574ec846\": container with ID starting with 2d74c51521c254870b9461129dbcf69399adc1f702b85c458336f76b574ec846 not found: ID does not exist" Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.865793 5055 scope.go:117] "RemoveContainer" containerID="f34fbdd3ae8ebbb3d7bb61159a2b49d89531275fb490875a2e60bb1a2968dabb" Oct 11 09:15:31 crc kubenswrapper[5055]: E1011 09:15:31.866333 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f34fbdd3ae8ebbb3d7bb61159a2b49d89531275fb490875a2e60bb1a2968dabb\": container with ID starting with f34fbdd3ae8ebbb3d7bb61159a2b49d89531275fb490875a2e60bb1a2968dabb not found: ID does not exist" containerID="f34fbdd3ae8ebbb3d7bb61159a2b49d89531275fb490875a2e60bb1a2968dabb" Oct 11 09:15:31 crc kubenswrapper[5055]: I1011 09:15:31.866385 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f34fbdd3ae8ebbb3d7bb61159a2b49d89531275fb490875a2e60bb1a2968dabb"} err="failed to get container status \"f34fbdd3ae8ebbb3d7bb61159a2b49d89531275fb490875a2e60bb1a2968dabb\": rpc error: code = NotFound desc = could not find container \"f34fbdd3ae8ebbb3d7bb61159a2b49d89531275fb490875a2e60bb1a2968dabb\": container with ID starting with f34fbdd3ae8ebbb3d7bb61159a2b49d89531275fb490875a2e60bb1a2968dabb not found: ID does not exist" Oct 11 09:15:33 crc kubenswrapper[5055]: I1011 09:15:33.005667 5055 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="280caae4-c1b8-498c-949e-5d852e525739" path="/var/lib/kubelet/pods/280caae4-c1b8-498c-949e-5d852e525739/volumes" Oct 11 09:15:43 crc kubenswrapper[5055]: I1011 09:15:43.994391 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:15:43 crc kubenswrapper[5055]: E1011 09:15:43.995565 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:15:55 crc kubenswrapper[5055]: I1011 09:15:55.651287 5055 scope.go:117] "RemoveContainer" containerID="eb76c84a0983f26da99f364e5206741b7b61a65d6e7430bdbec22440b45d8565" Oct 11 09:15:57 crc kubenswrapper[5055]: I1011 09:15:57.993948 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:15:57 crc kubenswrapper[5055]: E1011 09:15:57.994864 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:16:08 crc kubenswrapper[5055]: I1011 09:16:08.993113 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:16:08 crc kubenswrapper[5055]: E1011 09:16:08.993852 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:16:22 crc kubenswrapper[5055]: I1011 09:16:22.993618 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:16:22 crc kubenswrapper[5055]: E1011 09:16:22.994317 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:16:35 crc kubenswrapper[5055]: I1011 09:16:35.994185 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:16:35 crc kubenswrapper[5055]: E1011 09:16:35.994801 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:16:47 crc kubenswrapper[5055]: I1011 09:16:47.993718 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:16:47 crc kubenswrapper[5055]: E1011 09:16:47.994510 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.423841 5055 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bnbpk"] Oct 11 09:16:57 crc kubenswrapper[5055]: E1011 09:16:57.424671 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="280caae4-c1b8-498c-949e-5d852e525739" containerName="copy" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.424687 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="280caae4-c1b8-498c-949e-5d852e525739" containerName="copy" Oct 11 09:16:57 crc kubenswrapper[5055]: E1011 09:16:57.424704 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="280caae4-c1b8-498c-949e-5d852e525739" containerName="gather" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.424712 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="280caae4-c1b8-498c-949e-5d852e525739" containerName="gather" Oct 11 09:16:57 crc kubenswrapper[5055]: E1011 09:16:57.424749 5055 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5332f844-86cd-48c6-b808-5fdc280bcdb2" containerName="collect-profiles" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.424757 5055 state_mem.go:107] "Deleted CPUSet assignment" podUID="5332f844-86cd-48c6-b808-5fdc280bcdb2" containerName="collect-profiles" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.424988 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="280caae4-c1b8-498c-949e-5d852e525739" containerName="copy" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.425008 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="280caae4-c1b8-498c-949e-5d852e525739" containerName="gather" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.425033 5055 memory_manager.go:354] "RemoveStaleState removing state" podUID="5332f844-86cd-48c6-b808-5fdc280bcdb2" containerName="collect-profiles" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.426280 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.434951 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bnbpk"] Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.519391 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/558a1a82-c291-43e9-b87b-47b71c7a1026-catalog-content\") pod \"community-operators-bnbpk\" (UID: \"558a1a82-c291-43e9-b87b-47b71c7a1026\") " pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.519442 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fsjb\" (UniqueName: \"kubernetes.io/projected/558a1a82-c291-43e9-b87b-47b71c7a1026-kube-api-access-7fsjb\") pod \"community-operators-bnbpk\" (UID: \"558a1a82-c291-43e9-b87b-47b71c7a1026\") " pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.519470 5055 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/558a1a82-c291-43e9-b87b-47b71c7a1026-utilities\") pod \"community-operators-bnbpk\" (UID: \"558a1a82-c291-43e9-b87b-47b71c7a1026\") " pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.620803 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/558a1a82-c291-43e9-b87b-47b71c7a1026-catalog-content\") pod \"community-operators-bnbpk\" (UID: \"558a1a82-c291-43e9-b87b-47b71c7a1026\") " pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.621127 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fsjb\" (UniqueName: \"kubernetes.io/projected/558a1a82-c291-43e9-b87b-47b71c7a1026-kube-api-access-7fsjb\") pod \"community-operators-bnbpk\" (UID: \"558a1a82-c291-43e9-b87b-47b71c7a1026\") " pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.621255 5055 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/558a1a82-c291-43e9-b87b-47b71c7a1026-utilities\") pod \"community-operators-bnbpk\" (UID: \"558a1a82-c291-43e9-b87b-47b71c7a1026\") " pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.621364 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/558a1a82-c291-43e9-b87b-47b71c7a1026-catalog-content\") pod \"community-operators-bnbpk\" (UID: \"558a1a82-c291-43e9-b87b-47b71c7a1026\") " pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.621693 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/558a1a82-c291-43e9-b87b-47b71c7a1026-utilities\") pod \"community-operators-bnbpk\" (UID: \"558a1a82-c291-43e9-b87b-47b71c7a1026\") " pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.654348 5055 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fsjb\" (UniqueName: \"kubernetes.io/projected/558a1a82-c291-43e9-b87b-47b71c7a1026-kube-api-access-7fsjb\") pod \"community-operators-bnbpk\" (UID: \"558a1a82-c291-43e9-b87b-47b71c7a1026\") " pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:16:57 crc kubenswrapper[5055]: I1011 09:16:57.763642 5055 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:16:58 crc kubenswrapper[5055]: I1011 09:16:58.252171 5055 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bnbpk"] Oct 11 09:16:58 crc kubenswrapper[5055]: I1011 09:16:58.470651 5055 generic.go:334] "Generic (PLEG): container finished" podID="558a1a82-c291-43e9-b87b-47b71c7a1026" containerID="58ec2ddec3685166504f32ce222358cc6f25e50e581b6acaee4e16ca6506f44e" exitCode=0 Oct 11 09:16:58 crc kubenswrapper[5055]: I1011 09:16:58.470691 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bnbpk" event={"ID":"558a1a82-c291-43e9-b87b-47b71c7a1026","Type":"ContainerDied","Data":"58ec2ddec3685166504f32ce222358cc6f25e50e581b6acaee4e16ca6506f44e"} Oct 11 09:16:58 crc kubenswrapper[5055]: I1011 09:16:58.470982 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bnbpk" event={"ID":"558a1a82-c291-43e9-b87b-47b71c7a1026","Type":"ContainerStarted","Data":"f4cfa95743c3a87d19afe7ca474f0f4442f9e283f5e49a198f823d7f7b284091"} Oct 11 09:16:59 crc kubenswrapper[5055]: I1011 09:16:59.479673 5055 generic.go:334] "Generic (PLEG): container finished" podID="558a1a82-c291-43e9-b87b-47b71c7a1026" containerID="c6d061231de29caa73f1150ee44a1cf3d6edd2a9c19439a741e6adf1138504ed" exitCode=0 Oct 11 09:16:59 crc kubenswrapper[5055]: I1011 09:16:59.479774 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bnbpk" event={"ID":"558a1a82-c291-43e9-b87b-47b71c7a1026","Type":"ContainerDied","Data":"c6d061231de29caa73f1150ee44a1cf3d6edd2a9c19439a741e6adf1138504ed"} Oct 11 09:17:00 crc kubenswrapper[5055]: I1011 09:17:00.488461 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bnbpk" event={"ID":"558a1a82-c291-43e9-b87b-47b71c7a1026","Type":"ContainerStarted","Data":"ada303930669fb4cb2432dd4a3e56eb2f2857d433c605ab1423091e5c9419547"} Oct 11 09:17:00 crc kubenswrapper[5055]: I1011 09:17:00.517068 5055 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bnbpk" podStartSLOduration=2.122682374 podStartE2EDuration="3.517039781s" podCreationTimestamp="2025-10-11 09:16:57 +0000 UTC" firstStartedPulling="2025-10-11 09:16:58.471800079 +0000 UTC m=+8602.246073886" lastFinishedPulling="2025-10-11 09:16:59.866157486 +0000 UTC m=+8603.640431293" observedRunningTime="2025-10-11 09:17:00.516813064 +0000 UTC m=+8604.291086871" watchObservedRunningTime="2025-10-11 09:17:00.517039781 +0000 UTC m=+8604.291313628" Oct 11 09:17:01 crc kubenswrapper[5055]: I1011 09:17:01.994060 5055 scope.go:117] "RemoveContainer" containerID="3691df18dead86a00ed008d09d0cf8dd3aa7cb62cdd8376015624e5f056187db" Oct 11 09:17:01 crc kubenswrapper[5055]: E1011 09:17:01.994398 5055 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-qtqvf_openshift-machine-config-operator(46789346-5a88-43a0-ad63-b530185c8ca1)\"" pod="openshift-machine-config-operator/machine-config-daemon-qtqvf" podUID="46789346-5a88-43a0-ad63-b530185c8ca1" Oct 11 09:17:07 crc kubenswrapper[5055]: I1011 09:17:07.764217 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:17:07 crc kubenswrapper[5055]: I1011 09:17:07.764642 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:17:07 crc kubenswrapper[5055]: I1011 09:17:07.933318 5055 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:17:08 crc kubenswrapper[5055]: I1011 09:17:08.646465 5055 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:17:08 crc kubenswrapper[5055]: I1011 09:17:08.697607 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bnbpk"] Oct 11 09:17:10 crc kubenswrapper[5055]: I1011 09:17:10.599849 5055 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bnbpk" podUID="558a1a82-c291-43e9-b87b-47b71c7a1026" containerName="registry-server" containerID="cri-o://ada303930669fb4cb2432dd4a3e56eb2f2857d433c605ab1423091e5c9419547" gracePeriod=2 Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.063620 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.250025 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/558a1a82-c291-43e9-b87b-47b71c7a1026-catalog-content\") pod \"558a1a82-c291-43e9-b87b-47b71c7a1026\" (UID: \"558a1a82-c291-43e9-b87b-47b71c7a1026\") " Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.250190 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fsjb\" (UniqueName: \"kubernetes.io/projected/558a1a82-c291-43e9-b87b-47b71c7a1026-kube-api-access-7fsjb\") pod \"558a1a82-c291-43e9-b87b-47b71c7a1026\" (UID: \"558a1a82-c291-43e9-b87b-47b71c7a1026\") " Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.250246 5055 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/558a1a82-c291-43e9-b87b-47b71c7a1026-utilities\") pod \"558a1a82-c291-43e9-b87b-47b71c7a1026\" (UID: \"558a1a82-c291-43e9-b87b-47b71c7a1026\") " Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.251231 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/558a1a82-c291-43e9-b87b-47b71c7a1026-utilities" (OuterVolumeSpecName: "utilities") pod "558a1a82-c291-43e9-b87b-47b71c7a1026" (UID: "558a1a82-c291-43e9-b87b-47b71c7a1026"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.258980 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/558a1a82-c291-43e9-b87b-47b71c7a1026-kube-api-access-7fsjb" (OuterVolumeSpecName: "kube-api-access-7fsjb") pod "558a1a82-c291-43e9-b87b-47b71c7a1026" (UID: "558a1a82-c291-43e9-b87b-47b71c7a1026"). InnerVolumeSpecName "kube-api-access-7fsjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.307957 5055 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/558a1a82-c291-43e9-b87b-47b71c7a1026-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "558a1a82-c291-43e9-b87b-47b71c7a1026" (UID: "558a1a82-c291-43e9-b87b-47b71c7a1026"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.352004 5055 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/558a1a82-c291-43e9-b87b-47b71c7a1026-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.352054 5055 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fsjb\" (UniqueName: \"kubernetes.io/projected/558a1a82-c291-43e9-b87b-47b71c7a1026-kube-api-access-7fsjb\") on node \"crc\" DevicePath \"\"" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.352072 5055 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/558a1a82-c291-43e9-b87b-47b71c7a1026-utilities\") on node \"crc\" DevicePath \"\"" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.618321 5055 generic.go:334] "Generic (PLEG): container finished" podID="558a1a82-c291-43e9-b87b-47b71c7a1026" containerID="ada303930669fb4cb2432dd4a3e56eb2f2857d433c605ab1423091e5c9419547" exitCode=0 Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.618386 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bnbpk" event={"ID":"558a1a82-c291-43e9-b87b-47b71c7a1026","Type":"ContainerDied","Data":"ada303930669fb4cb2432dd4a3e56eb2f2857d433c605ab1423091e5c9419547"} Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.618658 5055 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bnbpk" event={"ID":"558a1a82-c291-43e9-b87b-47b71c7a1026","Type":"ContainerDied","Data":"f4cfa95743c3a87d19afe7ca474f0f4442f9e283f5e49a198f823d7f7b284091"} Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.618684 5055 scope.go:117] "RemoveContainer" containerID="ada303930669fb4cb2432dd4a3e56eb2f2857d433c605ab1423091e5c9419547" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.618492 5055 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bnbpk" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.637353 5055 scope.go:117] "RemoveContainer" containerID="c6d061231de29caa73f1150ee44a1cf3d6edd2a9c19439a741e6adf1138504ed" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.659103 5055 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bnbpk"] Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.665886 5055 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bnbpk"] Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.684407 5055 scope.go:117] "RemoveContainer" containerID="58ec2ddec3685166504f32ce222358cc6f25e50e581b6acaee4e16ca6506f44e" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.702365 5055 scope.go:117] "RemoveContainer" containerID="ada303930669fb4cb2432dd4a3e56eb2f2857d433c605ab1423091e5c9419547" Oct 11 09:17:11 crc kubenswrapper[5055]: E1011 09:17:11.702928 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ada303930669fb4cb2432dd4a3e56eb2f2857d433c605ab1423091e5c9419547\": container with ID starting with ada303930669fb4cb2432dd4a3e56eb2f2857d433c605ab1423091e5c9419547 not found: ID does not exist" containerID="ada303930669fb4cb2432dd4a3e56eb2f2857d433c605ab1423091e5c9419547" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.702980 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ada303930669fb4cb2432dd4a3e56eb2f2857d433c605ab1423091e5c9419547"} err="failed to get container status \"ada303930669fb4cb2432dd4a3e56eb2f2857d433c605ab1423091e5c9419547\": rpc error: code = NotFound desc = could not find container \"ada303930669fb4cb2432dd4a3e56eb2f2857d433c605ab1423091e5c9419547\": container with ID starting with ada303930669fb4cb2432dd4a3e56eb2f2857d433c605ab1423091e5c9419547 not found: ID does not exist" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.703012 5055 scope.go:117] "RemoveContainer" containerID="c6d061231de29caa73f1150ee44a1cf3d6edd2a9c19439a741e6adf1138504ed" Oct 11 09:17:11 crc kubenswrapper[5055]: E1011 09:17:11.703492 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6d061231de29caa73f1150ee44a1cf3d6edd2a9c19439a741e6adf1138504ed\": container with ID starting with c6d061231de29caa73f1150ee44a1cf3d6edd2a9c19439a741e6adf1138504ed not found: ID does not exist" containerID="c6d061231de29caa73f1150ee44a1cf3d6edd2a9c19439a741e6adf1138504ed" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.703594 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6d061231de29caa73f1150ee44a1cf3d6edd2a9c19439a741e6adf1138504ed"} err="failed to get container status \"c6d061231de29caa73f1150ee44a1cf3d6edd2a9c19439a741e6adf1138504ed\": rpc error: code = NotFound desc = could not find container \"c6d061231de29caa73f1150ee44a1cf3d6edd2a9c19439a741e6adf1138504ed\": container with ID starting with c6d061231de29caa73f1150ee44a1cf3d6edd2a9c19439a741e6adf1138504ed not found: ID does not exist" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.703722 5055 scope.go:117] "RemoveContainer" containerID="58ec2ddec3685166504f32ce222358cc6f25e50e581b6acaee4e16ca6506f44e" Oct 11 09:17:11 crc kubenswrapper[5055]: E1011 09:17:11.704115 5055 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58ec2ddec3685166504f32ce222358cc6f25e50e581b6acaee4e16ca6506f44e\": container with ID starting with 58ec2ddec3685166504f32ce222358cc6f25e50e581b6acaee4e16ca6506f44e not found: ID does not exist" containerID="58ec2ddec3685166504f32ce222358cc6f25e50e581b6acaee4e16ca6506f44e" Oct 11 09:17:11 crc kubenswrapper[5055]: I1011 09:17:11.704452 5055 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58ec2ddec3685166504f32ce222358cc6f25e50e581b6acaee4e16ca6506f44e"} err="failed to get container status \"58ec2ddec3685166504f32ce222358cc6f25e50e581b6acaee4e16ca6506f44e\": rpc error: code = NotFound desc = could not find container \"58ec2ddec3685166504f32ce222358cc6f25e50e581b6acaee4e16ca6506f44e\": container with ID starting with 58ec2ddec3685166504f32ce222358cc6f25e50e581b6acaee4e16ca6506f44e not found: ID does not exist" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515072420237024447 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015072420240017356 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015072376457016525 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015072376457015475 5ustar corecore